From 9f6fcb11ebe2948167e982aa9ca9713b54a0e47d Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 8 Nov 2012 21:28:44 -0800 Subject: [PATCH 0001/3726] Initial commit --- .gitignore | 27 +++++++++++++++++++++++++++ README.md | 2 ++ 2 files changed, 29 insertions(+) create mode 100644 .gitignore create mode 100644 README.md diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000..f24cd9952d --- /dev/null +++ b/.gitignore @@ -0,0 +1,27 @@ +*.py[co] + +# Packages +*.egg +*.egg-info +dist +build +eggs +parts +bin +var +sdist +develop-eggs +.installed.cfg + +# Installer logs +pip-log.txt + +# Unit test / coverage reports +.coverage +.tox + +#Translations +*.mo + +#Mr Developer +.mr.developer.cfg diff --git a/README.md b/README.md new file mode 100644 index 0000000000..869495c22b --- /dev/null +++ b/README.md @@ -0,0 +1,2 @@ +cassandraengine +=============== \ No newline at end of file From 4890726ad57ee388b205acf6749e93b4f8679132 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 8 Nov 2012 22:26:06 -0800 Subject: [PATCH 0002/3726] initial research --- cassandraengine/__init__.py | 0 cassandraengine/columns.py | 0 cassandraengine/connection.py | 19 +++++++++++++++++++ cassandraengine/document.py | 0 cassandraengine/tests/__init__.py | 0 requirements.txt | 0 6 files changed, 19 insertions(+) create mode 100644 cassandraengine/__init__.py create mode 100644 cassandraengine/columns.py create mode 100644 cassandraengine/connection.py create mode 100644 cassandraengine/document.py create mode 100644 cassandraengine/tests/__init__.py create mode 100644 requirements.txt diff --git a/cassandraengine/__init__.py b/cassandraengine/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cassandraengine/columns.py b/cassandraengine/columns.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cassandraengine/connection.py b/cassandraengine/connection.py new file mode 100644 index 0000000000..f4d6c36c62 --- /dev/null +++ b/cassandraengine/connection.py @@ -0,0 +1,19 @@ +#http://pypi.python.org/pypi/cql/1.0.4 +#http://code.google.com/a/apache-extras.org/p/cassandra-dbapi2/ + +import cql + +_keyspace = 'cse_test' +_col_fam = 'colfam' + +conn = cql.connect('127.0.0.1', 9160) +cur = conn.cursor() + +#cli examples here: +#http://wiki.apache.org/cassandra/CassandraCli +cur.execute('create keyspace {};'.format(_keyspace)) + +#http://stackoverflow.com/questions/10871595/how-do-you-create-a-counter-columnfamily-with-cql3-in-cassandra +cur.execute('create column family {};'.format(_col_fam)) + +cur.execute('insert into colfam (obj_id, content) values (:obj_id, :content)', dict(obj_id=str(uuid.uuid4()), content='yo!')) diff --git a/cassandraengine/document.py b/cassandraengine/document.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cassandraengine/tests/__init__.py b/cassandraengine/tests/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000..e69de29bb2 From bc3c26a1ef73d64d9a7835a1cd3d3dcd4f76dffd Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Fri, 9 Nov 2012 07:17:08 -0800 Subject: [PATCH 0003/3726] adding example cql and crud method stubs --- cassandraengine/connection.py | 56 ++++++++++++++++++++++++++++++++--- 1 file changed, 52 insertions(+), 4 deletions(-) diff --git a/cassandraengine/connection.py b/cassandraengine/connection.py index f4d6c36c62..27482829ff 100644 --- a/cassandraengine/connection.py +++ b/cassandraengine/connection.py @@ -1,5 +1,6 @@ #http://pypi.python.org/pypi/cql/1.0.4 #http://code.google.com/a/apache-extras.org/p/cassandra-dbapi2/ +#http://cassandra.apache.org/doc/cql/CQL.html import cql @@ -7,13 +8,60 @@ _col_fam = 'colfam' conn = cql.connect('127.0.0.1', 9160) -cur = conn.cursor() #cli examples here: #http://wiki.apache.org/cassandra/CassandraCli -cur.execute('create keyspace {};'.format(_keyspace)) +try: + conn.set_initial_keyspace(_keyspace) +except cql.ProgrammingError: + #http://www.datastax.com/docs/1.0/references/cql/CREATE_KEYSPACE + cur = conn.cursor() + cur.execute("create keyspace {} with strategy_class = 'SimpleStrategy' and strategy_options:replication_factor=1;".format(_keyspace)) + conn.set_initial_keyspace(_keyspace) +cur = conn.cursor() +#http://www.datastax.com/docs/1.0/dml/using_cql #http://stackoverflow.com/questions/10871595/how-do-you-create-a-counter-columnfamily-with-cql3-in-cassandra -cur.execute('create column family {};'.format(_col_fam)) +try: + cur.execute("""create table colfam (id int PRIMARY KEY);""") +except cql.ProgrammingError: + #cur.execute('drop table colfam;') + pass + +#http://www.datastax.com/docs/1.0/references/cql/INSERT +#updates/inserts do the same thing +cur.execute('insert into colfam (id, content) values (:id, :content)', dict(id=1, content='yo!')) + +cur.execute('select * from colfam WHERE id=1;') +cur.fetchone() + +cur.execute("update colfam set content='hey' where id=1;") +cur.execute('select * from colfam WHERE id=1;') +cur.fetchone() + +cur.execute('delete from colfam WHERE id=1;') +cur.execute('delete from colfam WHERE id in (1);') + +#TODO: Add alter altering existing schema functionality +#http://www.datastax.com/docs/1.0/references/cql/ALTER_COLUMNFAMILY + +def create_column_family(name): + """ + """ + +def select_row(column_family, **kwargs): + """ + """ + +def create_row(column_family, **kwargs): + """ + """ + +def update_row(column_family, **kwargs): + """ + """ + +def delete_row(column_family, **kwargs): + """ + """ -cur.execute('insert into colfam (obj_id, content) values (:obj_id, :content)', dict(obj_id=str(uuid.uuid4()), content='yo!')) From fb36faa0a6b8188ec6bb2d929fd91ae28349e4aa Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sat, 10 Nov 2012 10:43:28 -0800 Subject: [PATCH 0004/3726] adding initial column classes, document class stubs and an exception file --- cassandraengine/columns.py | 142 ++++++++++++++++++++++++++++++++++ cassandraengine/document.py | 18 +++++ cassandraengine/exceptions.py | 2 + 3 files changed, 162 insertions(+) create mode 100644 cassandraengine/exceptions.py diff --git a/cassandraengine/columns.py b/cassandraengine/columns.py index e69de29bb2..6bdda146ea 100644 --- a/cassandraengine/columns.py +++ b/cassandraengine/columns.py @@ -0,0 +1,142 @@ +#column field types +import re +from uuid import uuid1, uuid4 + +class BaseColumn(object): + """ + The base column + """ + + #the cassandra type this column maps to + db_type = None + + def __init__(self, primary_key=False, db_field=None, default=None, null=False): + """ + :param primary_key: bool flag, there can be only one primary key per doc + :param db_field: the fieldname this field will map to in the database + :param default: the default value, can be a value or a callable (no args) + :param null: bool, is the field nullable? + """ + self.primary_key = primary_key + self.db_field = db_field + self.default = default + self.null = null + + def validate(self, value): + if not self.has_default: + if not self.null and value is None: + raise ValidationError('null values are not allowed') + + def to_python(self, value): + """ + Converts data from the database into python values + raises a ValidationError if the value can't be converted + """ + return value + + @property + def has_default(self): + return bool(self.default) + + def get_default(self): + if self.has_default: + if callable(self.default): + return self.default() + else: + return self.default + + def to_database(self, value): + """ + Converts python value into database value + """ + if value is None and self.has_default: + return self.get_default() + return value + + def get_column_def(self): + """ + Returns a column definition for CQL table definition + """ + dterms = [self.db_field, self.db_type] + if self.primary_key: + dterms.append('PRIMARY KEY') + return ' '.join(dterms) + + def set_db_name(self, name): + """ + Sets the column name during document class construction + This value will be ignored if db_field is set in __init__ + """ + self.db_field = self.db_field or name + +class Bytes(BaseColumn): + db_type = 'blob' + +class Ascii(BaseColumn): + db_type = 'ascii' + +class Text(BaseColumn): + db_type = 'text' + +class Integer(BaseColumn): + db_type = 'int' + + def validate(self, value): + super(Integer, self).validate(value) + try: + long(value) + except (TypeError, ValueError): + raise ValidationError("{} can't be converted to integral value".format(value)) + + def to_python(self, value): + self.validate(value) + return long(value) + + def to_database(self, value): + self.validate() + return value + +class DateTime(BaseColumn): + db_type = 'timestamp' + +class UUID(BaseColumn): + """ + Type 1 or 4 UUID + """ + db_type = 'uuid' + + re_uuid = re.compile(r'[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}') + + def __init__(self, **kwargs): + super(UUID, self).__init__(**kwargs) + if 'default' not in kwargs: + self.default = uuid4 + + def validate(self, value): + super(UUID, self).validate(value) + if not self.re_uuid.match(value): + raise ValidationError("{} is not a valid uuid".format(value)) + return value + +class Boolean(BaseColumn): + db_type = 'boolean' + + def to_python(self, value): + return bool(value) + + def to_database(self, value): + return bool(value) + +class Float(BaseColumn): + db_type = 'double' + + def to_python(self, value): + return float(value) + + def to_database(self, value): + return float(value) + +class Decimal(BaseColumn): + db_type = 'decimal' + #TODO: this + diff --git a/cassandraengine/document.py b/cassandraengine/document.py index e69de29bb2..d8d72f04b7 100644 --- a/cassandraengine/document.py +++ b/cassandraengine/document.py @@ -0,0 +1,18 @@ + +class BaseDocument(object): + pass + +class DocumentMetaClass(type): + + def __new__(cls, name, bases, attrs): + """ + """ + #TODO:assert primary key exists + #TODO:get column family name + #TODO:check that all resolved column family names are unique + return super(DocumentMetaClass, cls).__new__(cls, name, bases, attrs) + +class Document(BaseDocument): + """ + """ + __metaclass__ = DocumentMetaClass diff --git a/cassandraengine/exceptions.py b/cassandraengine/exceptions.py new file mode 100644 index 0000000000..8265b0ceab --- /dev/null +++ b/cassandraengine/exceptions.py @@ -0,0 +1,2 @@ + +class ValidationError(BaseException): pass From f3ca4843b0fb8495a81c9d26d897aea579120e4f Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sat, 10 Nov 2012 17:26:27 -0800 Subject: [PATCH 0005/3726] Renamed document.py to models.py defined a base model class, as well as it's metaclass --- cassandraengine/document.py | 18 ----- cassandraengine/models.py | 127 ++++++++++++++++++++++++++++++++++++ 2 files changed, 127 insertions(+), 18 deletions(-) delete mode 100644 cassandraengine/document.py create mode 100644 cassandraengine/models.py diff --git a/cassandraengine/document.py b/cassandraengine/document.py deleted file mode 100644 index d8d72f04b7..0000000000 --- a/cassandraengine/document.py +++ /dev/null @@ -1,18 +0,0 @@ - -class BaseDocument(object): - pass - -class DocumentMetaClass(type): - - def __new__(cls, name, bases, attrs): - """ - """ - #TODO:assert primary key exists - #TODO:get column family name - #TODO:check that all resolved column family names are unique - return super(DocumentMetaClass, cls).__new__(cls, name, bases, attrs) - -class Document(BaseDocument): - """ - """ - __metaclass__ = DocumentMetaClass diff --git a/cassandraengine/models.py b/cassandraengine/models.py new file mode 100644 index 0000000000..c3fce738f1 --- /dev/null +++ b/cassandraengine/models.py @@ -0,0 +1,127 @@ + +from cassandraengine import columns +from cassandraengine.exceptions import ColumnFamilyException +from cassandraengine.manager import Manager + +class BaseModel(object): + """ + The base model class, don't inherit from this, inherit from Model, defined below + """ + + db_name = None + + def __init__(self, **values): + #set columns from values + for k,v in values.items(): + if k in self._columns: + setattr(self, k, v) + else: + self._dynamic_columns[k] = v + + #set excluded columns to None + for k in self._columns.keys(): + if k not in values: + setattr(self, k, None) + + @classmethod + def _column_family_definition(cls): + pass + + @classmethod + def find(cls, pk): + """ Loads a document by it's primary key """ + cls.objects.find(pk) + + @property + def pk(self): + """ Returns the object's primary key, regardless of it's name """ + return getattr(self, self._pk_name) + + def validate(self): + """ Cleans and validates the field values """ + for name, col in self._columns.items(): + val = col.validate(getattr(self, name)) + setattr(self, name, val) + + def as_dict(self): + """ Returns a map of column names to cleaned values """ + values = {} + for name, col in self._columns.items(): + values[name] = col.to_database(getattr(self, name, None)) + + #TODO: merge in dynamic columns + return values + + def save(self): + is_new = self.pk is None + self.validate() + self.objects._save_instance(self) + return self + + def delete(self): + pass + + +class ModelMetaClass(type): + + def __new__(cls, name, bases, attrs): + """ + """ + #move column definitions into _columns dict + #and set default column names + _columns = {} + pk_name = None + for k,v in attrs.items(): + if isinstance(v, columns.BaseColumn): + if v.is_primary_key: + if pk_name: + raise ColumnFamilyException("More than one primary key defined for {}".format(name)) + pk_name = k + _columns[k] = attrs.pop(k) + _columns[k].set_db_name(k) + + #set primary key if it's not already defined + if not pk_name: + _columns['id'] = columns.UUID(primary_key=True) + _columns['id'].set_db_name('id') + pk_name = 'id' + + #setup pk shortcut + if pk_name != 'pk': + pk_get = lambda self: getattr(self, pk_name) + pk_set = lambda self, val: setattr(self, pk_name, val) + attrs['pk'] = property(pk_get, pk_set) + + #check for duplicate column names + col_names = set() + for k,v in _columns.items(): + if v.db_field in col_names: + raise ColumnFamilyException("{} defines the column {} more than once".format(name, v.db_field)) + col_names.add(k) + + #get column family name + cf_name = attrs.pop('db_name', None) or name + + #create db_name -> model name map for loading + db_map = {} + for name, col in _columns.items(): + db_map[col.db_field] = name + + attrs['_columns'] = _columns + attrs['_db_map'] = db_map + attrs['_pk_name'] = pk_name + attrs['_dynamic_columns'] = {} + + klass = super(ModelMetaClass, cls).__new__(cls, name, bases, attrs) + klass.objects = Manager(klass) + return klass + + +class Model(BaseModel): + """ + the db name for the column family can be set as the attribute db_name, or + it will be genertaed from the class name + """ + __metaclass__ = ModelMetaClass + + From 44c1232f8ad8567b94e8c4217cd8474b4ebe010b Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sat, 10 Nov 2012 17:27:10 -0800 Subject: [PATCH 0006/3726] added validation to columns starting to clean up connection.py adding exceptions --- cassandraengine/columns.py | 51 +++++++++++++++++++---------- cassandraengine/connection.py | 60 +++++++++++++++-------------------- cassandraengine/exceptions.py | 4 ++- 3 files changed, 62 insertions(+), 53 deletions(-) diff --git a/cassandraengine/columns.py b/cassandraengine/columns.py index 6bdda146ea..7c90f5584c 100644 --- a/cassandraengine/columns.py +++ b/cassandraengine/columns.py @@ -2,6 +2,8 @@ import re from uuid import uuid1, uuid4 +from cassandraengine.exceptions import ValidationError + class BaseColumn(object): """ The base column @@ -23,9 +25,16 @@ def __init__(self, primary_key=False, db_field=None, default=None, null=False): self.null = null def validate(self, value): - if not self.has_default: - if not self.null and value is None: + """ + Returns a cleaned and validated value. Raises a ValidationError + if there's a problem + """ + if value is None: + if self.has_default: + return self.get_default() + elif not self.null: raise ValidationError('null values are not allowed') + return value def to_python(self, value): """ @@ -38,6 +47,10 @@ def to_python(self, value): def has_default(self): return bool(self.default) + @property + def is_primary_key(self): + return self.primary_key + def get_default(self): if self.has_default: if callable(self.default): @@ -82,19 +95,17 @@ class Integer(BaseColumn): db_type = 'int' def validate(self, value): - super(Integer, self).validate(value) + val = super(Integer, self).validate(value) try: - long(value) + return long(val) except (TypeError, ValueError): raise ValidationError("{} can't be converted to integral value".format(value)) def to_python(self, value): - self.validate(value) - return long(value) + return self.validate(value) def to_database(self, value): - self.validate() - return value + return self.validate(value) class DateTime(BaseColumn): db_type = 'timestamp' @@ -107,16 +118,16 @@ class UUID(BaseColumn): re_uuid = re.compile(r'[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}') - def __init__(self, **kwargs): - super(UUID, self).__init__(**kwargs) - if 'default' not in kwargs: - self.default = uuid4 + def __init__(self, default=lambda:uuid4(), **kwargs): + super(UUID, self).__init__(default=default, **kwargs) def validate(self, value): - super(UUID, self).validate(value) - if not self.re_uuid.match(value): + val = super(UUID, self).validate(value) + from uuid import UUID as _UUID + if isinstance(val, _UUID): return val + if not self.re_uuid.match(val): raise ValidationError("{} is not a valid uuid".format(value)) - return value + return _UUID(val) class Boolean(BaseColumn): db_type = 'boolean' @@ -130,11 +141,17 @@ def to_database(self, value): class Float(BaseColumn): db_type = 'double' + def validate(self, value): + try: + return float(value) + except (TypeError, ValueError): + raise ValidationError("{} is not a valid float".format(value)) + def to_python(self, value): - return float(value) + return self.validate(value) def to_database(self, value): - return float(value) + return self.validate(value) class Decimal(BaseColumn): db_type = 'decimal' diff --git a/cassandraengine/connection.py b/cassandraengine/connection.py index 27482829ff..3cebc3acbf 100644 --- a/cassandraengine/connection.py +++ b/cassandraengine/connection.py @@ -1,35 +1,44 @@ #http://pypi.python.org/pypi/cql/1.0.4 -#http://code.google.com/a/apache-extras.org/p/cassandra-dbapi2/ +#http://code.google.com/a/apache-extras.org/p/cassandra-dbapi2 / #http://cassandra.apache.org/doc/cql/CQL.html import cql -_keyspace = 'cse_test' -_col_fam = 'colfam' - -conn = cql.connect('127.0.0.1', 9160) +_keyspace = 'cassengine_test' + +_conn = None +def get_connection(): + global _conn + if _conn is None: + _conn = cql.connect('127.0.0.1', 9160) + _conn.set_cql_version('3.0.0') + try: + _conn.set_initial_keyspace(_keyspace) + except cql.ProgrammingError, e: + #http://www.datastax.com/docs/1.0/references/cql/CREATE_KEYSPACE + cur = _conn.cursor() + cur.execute("""create keyspace {} + with strategy_class = 'SimpleStrategy' + and strategy_options:replication_factor=1;""".format(_keyspace)) + _conn.set_initial_keyspace(_keyspace) + return _conn #cli examples here: #http://wiki.apache.org/cassandra/CassandraCli -try: - conn.set_initial_keyspace(_keyspace) -except cql.ProgrammingError: - #http://www.datastax.com/docs/1.0/references/cql/CREATE_KEYSPACE - cur = conn.cursor() - cur.execute("create keyspace {} with strategy_class = 'SimpleStrategy' and strategy_options:replication_factor=1;".format(_keyspace)) - conn.set_initial_keyspace(_keyspace) -cur = conn.cursor() - #http://www.datastax.com/docs/1.0/dml/using_cql #http://stackoverflow.com/questions/10871595/how-do-you-create-a-counter-columnfamily-with-cql3-in-cassandra +""" try: - cur.execute("""create table colfam (id int PRIMARY KEY);""") + cur.execute("create table colfam (id int PRIMARY KEY);") except cql.ProgrammingError: #cur.execute('drop table colfam;') pass +""" #http://www.datastax.com/docs/1.0/references/cql/INSERT #updates/inserts do the same thing + +""" cur.execute('insert into colfam (id, content) values (:id, :content)', dict(id=1, content='yo!')) cur.execute('select * from colfam WHERE id=1;') @@ -41,27 +50,8 @@ cur.execute('delete from colfam WHERE id=1;') cur.execute('delete from colfam WHERE id in (1);') +""" #TODO: Add alter altering existing schema functionality #http://www.datastax.com/docs/1.0/references/cql/ALTER_COLUMNFAMILY -def create_column_family(name): - """ - """ - -def select_row(column_family, **kwargs): - """ - """ - -def create_row(column_family, **kwargs): - """ - """ - -def update_row(column_family, **kwargs): - """ - """ - -def delete_row(column_family, **kwargs): - """ - """ - diff --git a/cassandraengine/exceptions.py b/cassandraengine/exceptions.py index 8265b0ceab..a68518875e 100644 --- a/cassandraengine/exceptions.py +++ b/cassandraengine/exceptions.py @@ -1,2 +1,4 @@ - +#cassandraengine exceptions +class ColumnFamilyException(BaseException): pass class ValidationError(BaseException): pass + From ec1a3b02f4482f674fb91faf3a5359249ed2113a Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sat, 10 Nov 2012 17:27:54 -0800 Subject: [PATCH 0007/3726] adding object manager adding queryset class with insert, select, and table create and drop --- cassandraengine/manager.py | 80 ++++++++++++++++++++++ cassandraengine/query.py | 137 +++++++++++++++++++++++++++++++++++++ 2 files changed, 217 insertions(+) create mode 100644 cassandraengine/manager.py create mode 100644 cassandraengine/query.py diff --git a/cassandraengine/manager.py b/cassandraengine/manager.py new file mode 100644 index 0000000000..9dc501b244 --- /dev/null +++ b/cassandraengine/manager.py @@ -0,0 +1,80 @@ +#manager class + +from cassandraengine.query import QuerySet + +class Manager(object): + + def __init__(self, model): + super(Manager, self).__init__() + self.model = model + + @property + def column_family_name(self): + """ + Returns the column family name if it's been defined + otherwise, it creates it from the module and class name + """ + if self.model.db_name: + return self.model.db_name + cf_name = self.model.__module__ + '.' + self.model.__name__ + cf_name = cf_name.replace('.', '_') + #trim to less than 48 characters or cassandra will complain + cf_name = cf_name[-48:] + return cf_name + + def column_family_definition(self): + """ + Generates a definition used for tale creation + """ + + def find(self, pk): + """ + Returns the row corresponding to the primary key value given + """ + values = QuerySet(self.model).find(pk) + #change the column names to model names + #in case they are different + field_dict = {} + db_map = self.model._db_map + for key, val in values.items(): + if key in db_map: + field_dict[db_map[key]] = val + else: + field_dict[key] = val + return self.model(**field_dict) + + def all(self): + return QuerySet(self.model).all() + + def filter(self, **kwargs): + return QuerySet(self.model).filter(**kwargs) + + def exclude(self, **kwargs): + return QuerySet(self.model).exclude(**kwargs) + + def create(self, **kwargs): + return self.model(**kwargs).save() + + def _save_instance(self, instance): + """ + The business end of save, this is called by the models + save method and calls the Query save method. This should + only be called by the model saving itself + """ + QuerySet(self.model).save(instance) + + def _create_column_family(self): + QuerySet(self.model)._create_column_family() + + def _delete_column_family(self): + QuerySet(self.model)._delete_column_family() + + def delete(self, **kwargs): + pass + + def __call__(self, **kwargs): + """ + filter shortcut + """ + return self.filter(**kwargs) + diff --git a/cassandraengine/query.py b/cassandraengine/query.py new file mode 100644 index 0000000000..bb0532d86b --- /dev/null +++ b/cassandraengine/query.py @@ -0,0 +1,137 @@ +from cassandraengine.connection import get_connection + +class QuerySet(object): + #TODO: querysets should be immutable + #TODO: querysets should be executed lazily + #TODO: conflicting filter args should raise exception unless a force kwarg is supplied + + def __init__(self, model, query={}): + super(QuerySet, self).__init__() + self.model = model + self.column_family_name = self.model.objects.column_family_name + + self._cursor = None + + #----query generation / execution---- + def _execute_query(self): + pass + + def _generate_querystring(self): + pass + + @property + def cursor(self): + if self._cursor is None: + self._cursor = self._execute_query() + return self._cursor + + #----Reads------ + def __iter__(self): + pass + + def next(self): + pass + + def first(self): + conn = get_connection() + cur = conn.cursor() + pass + + def all(self): + pass + + def filter(self, **kwargs): + pass + + def exclude(self, **kwargs): + pass + + def find(self, pk): + """ + loads one document identified by it's primary key + """ + qs = 'SELECT * FROM {column_family} WHERE {pk_name}=:{pk_name}' + qs = qs.format(column_family=self.column_family_name, + pk_name=self.model._pk_name) + conn = get_connection() + cur = conn.cursor() + cur.execute(qs, {self.model._pk_name:pk}) + values = cur.fetchone() + names = [i[0] for i in cur.description] + value_dict = dict(zip(names, values)) + return value_dict + + + #----writes---- + def save(self, instance): + """ + Creates / updates a row. + This is a blind insert call. + All validation and cleaning needs to happen + prior to calling this. + """ + assert type(instance) == self.model + #organize data + value_pairs = [] + + #get pk + col = self.model._columns[self.model._pk_name] + values = instance.as_dict() + value_pairs += [(col.db_field, values.get(self.model._pk_name))] + + #get defined fields and their column names + for name, col in self.model._columns.items(): + if col.is_primary_key: continue + value_pairs += [(col.db_field, values.get(name))] + + #add dynamic fields + for key, val in values.items(): + if key in self.model._columns: continue + value_pairs += [(key, val)] + + #construct query string + field_names = zip(*value_pairs)[0] + field_values = dict(value_pairs) + qs = ["INSERT INTO {}".format(self.column_family_name)] + qs += ["({})".format(', '.join(field_names))] + qs += ['VALUES'] + qs += ["({})".format(', '.join([':'+f for f in field_names]))] + qs = ' '.join(qs) + + conn = get_connection() + cur = conn.cursor() + cur.execute(qs, field_values) + + def _create_column_family(self): + #construct query string + qs = ['CREATE TABLE {}'.format(self.column_family_name)] + + #add column types + qtypes = [] + def add_column(col): + s = '{} {}'.format(col.db_field, col.db_type) + if col.primary_key: s += ' PRIMARY KEY' + qtypes.append(s) + add_column(self.model._columns[self.model._pk_name]) + for name, col in self.model._columns.items(): + if col.primary_key: continue + add_column(col) + + qs += ['({})'.format(', '.join(qtypes))] + qs = ' '.join(qs) + + #add primary key + conn = get_connection() + cur = conn.cursor() + try: + cur.execute(qs) + except BaseException, e: + if 'Cannot add already existing column family' not in e.message: + raise + + def _delete_column_family(self): + conn = get_connection() + cur = conn.cursor() + cur.execute('drop table {};'.format(self.column_family_name)) + + From 2aaf1650f32a6e4511c5b0cdcf95e25ebcf0b175 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sat, 10 Nov 2012 17:28:55 -0800 Subject: [PATCH 0008/3726] added some unit tests, updated the readme and added requirements.txt --- README.md | 16 ++++++++- cassandraengine/tests/base.py | 11 +++++++ cassandraengine/tests/columns/__init__.py | 0 cassandraengine/tests/model/__init__.py | 0 .../tests/model/test_class_construction.py | 33 +++++++++++++++++++ cassandraengine/tests/model/test_model_io.py | 26 +++++++++++++++ requirements.txt | 1 + 7 files changed, 86 insertions(+), 1 deletion(-) create mode 100644 cassandraengine/tests/base.py create mode 100644 cassandraengine/tests/columns/__init__.py create mode 100644 cassandraengine/tests/model/__init__.py create mode 100644 cassandraengine/tests/model/test_class_construction.py create mode 100644 cassandraengine/tests/model/test_model_io.py diff --git a/README.md b/README.md index 869495c22b..0e174ce9ad 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,16 @@ cassandraengine -=============== \ No newline at end of file +=============== + +Django ORM / Mongoengine style ORM for Cassandra + +In it's current state you can define column families, create and delete column families +based on your model definiteions, save models and retrieve models by their primary keys. + +That's about it. Also, there are only 2 tests and the CQL stuff is very simplistic at this point. + +##TODO +* dynamic column support +* return None when row isn't found in find() +* tests +* query functionality +* nice column and model class __repr__ diff --git a/cassandraengine/tests/base.py b/cassandraengine/tests/base.py new file mode 100644 index 0000000000..ac1f31d8f1 --- /dev/null +++ b/cassandraengine/tests/base.py @@ -0,0 +1,11 @@ +from unittest import TestCase + +class BaseCassEngTestCase(TestCase): + + def assertHasAttr(self, obj, attr): + self.assertTrue(hasattr(obj, attr), + "{} doesn't have attribute: {}".format(obj, attr)) + + def assertNotHasAttr(self, obj, attr): + self.assertFalse(hasattr(obj, attr), + "{} shouldn't have the attribute: {}".format(obj, attr)) diff --git a/cassandraengine/tests/columns/__init__.py b/cassandraengine/tests/columns/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cassandraengine/tests/model/__init__.py b/cassandraengine/tests/model/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cassandraengine/tests/model/test_class_construction.py b/cassandraengine/tests/model/test_class_construction.py new file mode 100644 index 0000000000..c02fc4e2bd --- /dev/null +++ b/cassandraengine/tests/model/test_class_construction.py @@ -0,0 +1,33 @@ +from cassandraengine.tests.base import BaseCassEngTestCase + +from cassandraengine.models import Model +from cassandraengine import columns + +class TestModelClassFunction(BaseCassEngTestCase): + + def test_column_attributes_handled_correctly(self): + """ + Tests that column attributes are moved to a _columns dict + and replaced with simple value attributes + """ + + class TestModel(Model): + text = columns.Text() + + self.assertHasAttr(TestModel, '_columns') + self.assertNotHasAttr(TestModel, 'id') + self.assertNotHasAttr(TestModel, 'text') + + inst = TestModel() + self.assertHasAttr(inst, 'id') + self.assertHasAttr(inst, 'text') + self.assertIsNone(inst.id) + self.assertIsNone(inst.text) + + +class TestModelValidation(BaseCassEngTestCase): + pass + +class TestModelSerialization(BaseCassEngTestCase): + pass + diff --git a/cassandraengine/tests/model/test_model_io.py b/cassandraengine/tests/model/test_model_io.py new file mode 100644 index 0000000000..4b73c26e0c --- /dev/null +++ b/cassandraengine/tests/model/test_model_io.py @@ -0,0 +1,26 @@ +from cassandraengine.tests.base import BaseCassEngTestCase + +from cassandraengine.models import Model +from cassandraengine import columns + +class TestModel(Model): + count = columns.Integer() + text = columns.Text() + +class TestModelIO(BaseCassEngTestCase): + + def setUp(self): + super(TestModelIO, self).setUp() + TestModel.objects._create_column_family() + + def tearDown(self): + super(TestModelIO, self).tearDown() + TestModel.objects._delete_column_family() + + def test_model_save_and_load(self): + tm = TestModel.objects.create(count=8, text='123456789') + tm2 = TestModel.objects.find(tm.pk) + + for cname in tm._columns.keys(): + self.assertEquals(getattr(tm, cname), getattr(tm2, cname)) + diff --git a/requirements.txt b/requirements.txt index e69de29bb2..b6734c8844 100644 --- a/requirements.txt +++ b/requirements.txt @@ -0,0 +1 @@ +cql==1.2.0 From 18f52886e7b570ee7c1fe0dbc6782d73abe7664a Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sat, 10 Nov 2012 19:33:22 -0800 Subject: [PATCH 0009/3726] renamed some exceptions --- cassandraengine/__init__.py | 3 +++ cassandraengine/columns.py | 3 --- cassandraengine/exceptions.py | 2 +- cassandraengine/models.py | 10 ++++++---- 4 files changed, 10 insertions(+), 8 deletions(-) diff --git a/cassandraengine/__init__.py b/cassandraengine/__init__.py index e69de29bb2..af95d0b003 100644 --- a/cassandraengine/__init__.py +++ b/cassandraengine/__init__.py @@ -0,0 +1,3 @@ +from cassandraengine.columns import * +from cassandraengine.models import Model + diff --git a/cassandraengine/columns.py b/cassandraengine/columns.py index 7c90f5584c..747cfba97c 100644 --- a/cassandraengine/columns.py +++ b/cassandraengine/columns.py @@ -5,9 +5,6 @@ from cassandraengine.exceptions import ValidationError class BaseColumn(object): - """ - The base column - """ #the cassandra type this column maps to db_type = None diff --git a/cassandraengine/exceptions.py b/cassandraengine/exceptions.py index a68518875e..4b20f7a334 100644 --- a/cassandraengine/exceptions.py +++ b/cassandraengine/exceptions.py @@ -1,4 +1,4 @@ #cassandraengine exceptions -class ColumnFamilyException(BaseException): pass +class ModelException(BaseException): pass class ValidationError(BaseException): pass diff --git a/cassandraengine/models.py b/cassandraengine/models.py index c3fce738f1..4e5bd9d3ec 100644 --- a/cassandraengine/models.py +++ b/cassandraengine/models.py @@ -1,6 +1,6 @@ from cassandraengine import columns -from cassandraengine.exceptions import ColumnFamilyException +from cassandraengine.exceptions import ModelException from cassandraengine.manager import Manager class BaseModel(object): @@ -8,6 +8,8 @@ class BaseModel(object): The base model class, don't inherit from this, inherit from Model, defined below """ + #table names will be generated automatically from it's model name and package + #however, you can alse define them manually here db_name = None def __init__(self, **values): @@ -75,7 +77,7 @@ def __new__(cls, name, bases, attrs): if isinstance(v, columns.BaseColumn): if v.is_primary_key: if pk_name: - raise ColumnFamilyException("More than one primary key defined for {}".format(name)) + raise ModelException("More than one primary key defined for {}".format(name)) pk_name = k _columns[k] = attrs.pop(k) _columns[k].set_db_name(k) @@ -96,8 +98,8 @@ def __new__(cls, name, bases, attrs): col_names = set() for k,v in _columns.items(): if v.db_field in col_names: - raise ColumnFamilyException("{} defines the column {} more than once".format(name, v.db_field)) - col_names.add(k) + raise ModelException("{} defines the column {} more than once".format(name, v.db_field)) + col_names.add(v.db_field) #get column family name cf_name = attrs.pop('db_name', None) or name From cfebd5a32d3b0628c461d96e09c13d297c42da2d Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sat, 10 Nov 2012 19:34:16 -0800 Subject: [PATCH 0010/3726] added more tests around the metaclass behavior --- .../tests/model/test_class_construction.py | 35 ++++++++++++++++--- 1 file changed, 31 insertions(+), 4 deletions(-) diff --git a/cassandraengine/tests/model/test_class_construction.py b/cassandraengine/tests/model/test_class_construction.py index c02fc4e2bd..cc24a8c08b 100644 --- a/cassandraengine/tests/model/test_class_construction.py +++ b/cassandraengine/tests/model/test_class_construction.py @@ -1,9 +1,13 @@ from cassandraengine.tests.base import BaseCassEngTestCase +from cassandraengine.exceptions import ModelException from cassandraengine.models import Model from cassandraengine import columns class TestModelClassFunction(BaseCassEngTestCase): + """ + Tests verifying the behavior of the Model metaclass + """ def test_column_attributes_handled_correctly(self): """ @@ -24,10 +28,33 @@ class TestModel(Model): self.assertIsNone(inst.id) self.assertIsNone(inst.text) + def test_multiple_primary_keys_fail(self): + """Test attempting to define multiple primary keys fails""" + with self.assertRaises(ModelException): + class MultiPK(Model): + id1 = columns.Integer(primary_key=True) + id2 = columns.Integer(primary_key=True) -class TestModelValidation(BaseCassEngTestCase): - pass + def test_db_map(self): + """ + Tests that the db_map is properly defined + -the db_map allows columns + """ + class WildDBNames(Model): + content = columns.Text(db_field='words_and_whatnot') + numbers = columns.Integer(db_field='integers_etc') + + db_map = WildDBNames._db_map + self.assertEquals(db_map['words_and_whatnot'], 'content') + self.assertEquals(db_map['integers_etc'], 'numbers') + + def test_attempting_to_make_duplicate_column_names_fails(self): + """ + Tests that trying to create conflicting db column names will fail + """ -class TestModelSerialization(BaseCassEngTestCase): - pass + with self.assertRaises(ModelException): + class BadNames(Model): + words = columns.Text() + content = columns.Text(db_field='words') From b6042ac57ad62d7f73600588061cb194e114ba83 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sat, 10 Nov 2012 21:12:47 -0800 Subject: [PATCH 0011/3726] adding dynamic columns to models (which aren't working yet) working on the QuerySet class adding additional tests around model saving and loading --- cassandraengine/columns.py | 2 +- cassandraengine/models.py | 18 ++++--- cassandraengine/query.py | 54 ++++++++++++++----- .../tests/columns/test_validation.py | 16 ++++++ cassandraengine/tests/model/test_model_io.py | 45 ++++++++++++++-- .../tests/model/test_validation.py | 0 6 files changed, 109 insertions(+), 26 deletions(-) create mode 100644 cassandraengine/tests/columns/test_validation.py create mode 100644 cassandraengine/tests/model/test_validation.py diff --git a/cassandraengine/columns.py b/cassandraengine/columns.py index 747cfba97c..aa7a4b355b 100644 --- a/cassandraengine/columns.py +++ b/cassandraengine/columns.py @@ -14,7 +14,7 @@ def __init__(self, primary_key=False, db_field=None, default=None, null=False): :param primary_key: bool flag, there can be only one primary key per doc :param db_field: the fieldname this field will map to in the database :param default: the default value, can be a value or a callable (no args) - :param null: bool, is the field nullable? + :param null: boolean, is the field nullable? """ self.primary_key = primary_key self.db_field = db_field diff --git a/cassandraengine/models.py b/cassandraengine/models.py index 4e5bd9d3ec..115008960f 100644 --- a/cassandraengine/models.py +++ b/cassandraengine/models.py @@ -25,10 +25,6 @@ def __init__(self, **values): if k not in values: setattr(self, k, None) - @classmethod - def _column_family_definition(cls): - pass - @classmethod def find(cls, pk): """ Loads a document by it's primary key """ @@ -39,6 +35,16 @@ def pk(self): """ Returns the object's primary key, regardless of it's name """ return getattr(self, self._pk_name) + #dynamic column methods + def __getitem__(self, key): + return self._dynamic_columns[key] + + def __setitem__(self, key, val): + self._dynamic_columns[key] = val + + def __delitem__(self, key): + del self._dynamic_columns[key] + def validate(self): """ Cleans and validates the field values """ for name, col in self._columns.items(): @@ -47,11 +53,9 @@ def validate(self): def as_dict(self): """ Returns a map of column names to cleaned values """ - values = {} + values = self._dynamic_columns or {} for name, col in self._columns.items(): values[name] = col.to_database(getattr(self, name, None)) - - #TODO: merge in dynamic columns return values def save(self): diff --git a/cassandraengine/query.py b/cassandraengine/query.py index bb0532d86b..76149f633a 100644 --- a/cassandraengine/query.py +++ b/cassandraengine/query.py @@ -1,3 +1,5 @@ +import copy + from cassandraengine.connection import get_connection class QuerySet(object): @@ -5,16 +7,18 @@ class QuerySet(object): #TODO: querysets should be executed lazily #TODO: conflicting filter args should raise exception unless a force kwarg is supplied - def __init__(self, model, query={}): + def __init__(self, model, query_args={}): super(QuerySet, self).__init__() self.model = model + self.query_args = query_args self.column_family_name = self.model.objects.column_family_name self._cursor = None #----query generation / execution---- def _execute_query(self): - pass + conn = get_connection() + self._cursor = conn.cursor() def _generate_querystring(self): pass @@ -27,39 +31,61 @@ def cursor(self): #----Reads------ def __iter__(self): - pass + if self._cursor is None: + self._execute_query() + return self + + def _get_next(self): + """ + Gets the next cursor result + Returns a db_field->value dict + """ + cur = self._cursor + values = cur.fetchone() + if values is None: return None + names = [i[0] for i in cur.description] + value_dict = dict(zip(names, values)) + return value_dict def next(self): - pass + values = self._get_next() + if values is None: raise StopIteration + return values def first(self): - conn = get_connection() - cur = conn.cursor() pass def all(self): - pass + return QuerySet(self.model) def filter(self, **kwargs): - pass + qargs = copy.deepcopy(self.query_args) + qargs.update(kwargs) + return QuerySet(self.model, query_args=qargs) def exclude(self, **kwargs): + """ + Need to invert the logic for all kwargs + """ pass + def count(self): + """ + Returns the number of rows matched by this query + """ + def find(self, pk): """ loads one document identified by it's primary key """ + #TODO: make this a convenience wrapper of the filter method qs = 'SELECT * FROM {column_family} WHERE {pk_name}=:{pk_name}' qs = qs.format(column_family=self.column_family_name, pk_name=self.model._pk_name) conn = get_connection() - cur = conn.cursor() - cur.execute(qs, {self.model._pk_name:pk}) - values = cur.fetchone() - names = [i[0] for i in cur.description] - value_dict = dict(zip(names, values)) - return value_dict + self._cursor = conn.cursor() + self._cursor.execute(qs, {self.model._pk_name:pk}) + return self._get_next() #----writes---- diff --git a/cassandraengine/tests/columns/test_validation.py b/cassandraengine/tests/columns/test_validation.py new file mode 100644 index 0000000000..e42519cc3b --- /dev/null +++ b/cassandraengine/tests/columns/test_validation.py @@ -0,0 +1,16 @@ +#tests the behavior of the column classes + +from cassandraengine.tests.base import BaseCassEngTestCase + +from cassandraengine.columns import BaseColumn +from cassandraengine.columns import Bytes +from cassandraengine.columns import Ascii +from cassandraengine.columns import Text +from cassandraengine.columns import Integer +from cassandraengine.columns import DateTime +from cassandraengine.columns import UUID +from cassandraengine.columns import Boolean +from cassandraengine.columns import Float +from cassandraengine.columns import Decimal + + diff --git a/cassandraengine/tests/model/test_model_io.py b/cassandraengine/tests/model/test_model_io.py index 4b73c26e0c..7680a7897d 100644 --- a/cassandraengine/tests/model/test_model_io.py +++ b/cassandraengine/tests/model/test_model_io.py @@ -1,3 +1,4 @@ +from unittest import skip from cassandraengine.tests.base import BaseCassEngTestCase from cassandraengine.models import Model @@ -7,20 +8,56 @@ class TestModel(Model): count = columns.Integer() text = columns.Text() +#class TestModel2(Model): + class TestModelIO(BaseCassEngTestCase): def setUp(self): super(TestModelIO, self).setUp() TestModel.objects._create_column_family() - def tearDown(self): - super(TestModelIO, self).tearDown() - TestModel.objects._delete_column_family() - def test_model_save_and_load(self): + """ + Tests that models can be saved and retrieved + """ tm = TestModel.objects.create(count=8, text='123456789') tm2 = TestModel.objects.find(tm.pk) for cname in tm._columns.keys(): self.assertEquals(getattr(tm, cname), getattr(tm2, cname)) + def test_model_updating_works_properly(self): + """ + Tests that subsequent saves after initial model creation work + """ + tm = TestModel.objects.create(count=8, text='123456789') + + tm.count = 100 + tm.save() + + tm2 = TestModel.objects.find(tm.pk) + self.assertEquals(tm.count, tm2.count) + + def test_nullable_columns_are_saved_properly(self): + """ + Tests that nullable columns save without any trouble + """ + + @skip + def test_dynamic_columns(self): + """ + Tests that items put into dynamic columns are saved and retrieved properly + + Note: seems I've misunderstood how arbitrary column names work in Cassandra + skipping for now + """ + #TODO:Fix this + tm = TestModel(count=8, text='123456789') + tm['other'] = 'something' + tm['number'] = 5 + tm.save() + + tm2 = TestModel.objects.find(tm.pk) + self.assertEquals(tm['other'], tm2['other']) + self.assertEquals(tm['number'], tm2['number']) + diff --git a/cassandraengine/tests/model/test_validation.py b/cassandraengine/tests/model/test_validation.py new file mode 100644 index 0000000000..e69de29bb2 From 4764b492eb44b1a2c3bcdc38b80b71704c39bb58 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 11 Nov 2012 11:43:05 -0800 Subject: [PATCH 0012/3726] adding single instance delete method and supporting unit test adding column type stubs updating readme --- README.md | 12 ++++++-- cassandraengine/columns.py | 24 +++++++++++++++- cassandraengine/manager.py | 30 +++++++++++--------- cassandraengine/models.py | 3 +- cassandraengine/query.py | 28 +++++++++++++----- cassandraengine/tests/model/test_model_io.py | 9 ++++++ 6 files changed, 81 insertions(+), 25 deletions(-) diff --git a/README.md b/README.md index 0e174ce9ad..136523416c 100644 --- a/README.md +++ b/README.md @@ -1,16 +1,22 @@ cassandraengine =============== -Django ORM / Mongoengine style ORM for Cassandra +Python Cassandra ORM in the style of django / mongoengine In it's current state you can define column families, create and delete column families based on your model definiteions, save models and retrieve models by their primary keys. -That's about it. Also, there are only 2 tests and the CQL stuff is very simplistic at this point. +That's about it. Also, the CQL stuff is pretty simple at this point. ##TODO +* Complex queries (class Q(object)) +* Match column names to mongoengine field names? +* mongoengine fields? URLField, EmbeddedDocument, ListField, DictField +* column ttl? +* ForeignKey/DBRef fields? * dynamic column support -* return None when row isn't found in find() * tests * query functionality * nice column and model class __repr__ + + diff --git a/cassandraengine/columns.py b/cassandraengine/columns.py index aa7a4b355b..67f697ea25 100644 --- a/cassandraengine/columns.py +++ b/cassandraengine/columns.py @@ -106,6 +106,9 @@ def to_database(self, value): class DateTime(BaseColumn): db_type = 'timestamp' + def __init__(self, **kwargs): + super(DateTime, self).__init__(**kwargs) + raise NotImplementedError class UUID(BaseColumn): """ @@ -153,4 +156,23 @@ def to_database(self, value): class Decimal(BaseColumn): db_type = 'decimal' #TODO: this - + def __init__(self, **kwargs): + super(DateTime, self).__init__(**kwargs) + raise NotImplementedError + +class Counter(BaseColumn): + def __init__(self, **kwargs): + super(DateTime, self).__init__(**kwargs) + raise NotImplementedError + +#TODO: research supercolumns +#http://wiki.apache.org/cassandra/DataModel +class List(BaseColumn): + def __init__(self, **kwargs): + super(DateTime, self).__init__(**kwargs) + raise NotImplementedError + +class Dict(BaseColumn): + def __init__(self, **kwargs): + super(DateTime, self).__init__(**kwargs) + raise NotImplementedError diff --git a/cassandraengine/manager.py b/cassandraengine/manager.py index 9dc501b244..3f7ec7f66e 100644 --- a/cassandraengine/manager.py +++ b/cassandraengine/manager.py @@ -21,17 +21,20 @@ def column_family_name(self): #trim to less than 48 characters or cassandra will complain cf_name = cf_name[-48:] return cf_name - - def column_family_definition(self): + + def __call__(self, **kwargs): """ - Generates a definition used for tale creation + filter shortcut """ + return self.filter(**kwargs) def find(self, pk): """ Returns the row corresponding to the primary key value given """ values = QuerySet(self.model).find(pk) + if values is None: return + #change the column names to model names #in case they are different field_dict = {} @@ -55,6 +58,10 @@ def exclude(self, **kwargs): def create(self, **kwargs): return self.model(**kwargs).save() + def delete(self, **kwargs): + pass + + #----single instance methods---- def _save_instance(self, instance): """ The business end of save, this is called by the models @@ -63,18 +70,15 @@ def _save_instance(self, instance): """ QuerySet(self.model).save(instance) + def _delete_instance(self, instance): + """ + Deletes a single instance + """ + QuerySet(self.model).delete_instance(instance) + + #----column family create/delete---- def _create_column_family(self): QuerySet(self.model)._create_column_family() def _delete_column_family(self): QuerySet(self.model)._delete_column_family() - - def delete(self, **kwargs): - pass - - def __call__(self, **kwargs): - """ - filter shortcut - """ - return self.filter(**kwargs) - diff --git a/cassandraengine/models.py b/cassandraengine/models.py index 115008960f..040682d4f8 100644 --- a/cassandraengine/models.py +++ b/cassandraengine/models.py @@ -65,7 +65,8 @@ def save(self): return self def delete(self): - pass + """ Deletes this instance """ + self.objects._delete_instance(self) class ModelMetaClass(type): diff --git a/cassandraengine/query.py b/cassandraengine/query.py index 76149f633a..025d135ab6 100644 --- a/cassandraengine/query.py +++ b/cassandraengine/query.py @@ -42,7 +42,7 @@ def _get_next(self): """ cur = self._cursor values = cur.fetchone() - if values is None: return None + if values is None: return names = [i[0] for i in cur.description] value_dict = dict(zip(names, values)) return value_dict @@ -64,15 +64,12 @@ def filter(self, **kwargs): return QuerySet(self.model, query_args=qargs) def exclude(self, **kwargs): - """ - Need to invert the logic for all kwargs - """ + """ Need to invert the logic for all kwargs """ pass def count(self): - """ - Returns the number of rows matched by this query - """ + """ Returns the number of rows matched by this query """ + qs = 'SELECT COUNT(*) FROM {}'.format(self.column_family_name) def find(self, pk): """ @@ -128,6 +125,23 @@ def save(self, instance): cur = conn.cursor() cur.execute(qs, field_values) + #----delete--- + def delete(self): + """ + Deletes the contents of a query + """ + + def delete_instance(self, instance): + """ Deletes one instance """ + pk_name = self.model._pk_name + qs = ['DELETE FROM {}'.format(self.column_family_name)] + qs += ['WHERE {0}=:{0}'.format(pk_name)] + qs = ' '.join(qs) + + conn = get_connection() + cur = conn.cursor() + cur.execute(qs, {pk_name:instance.pk}) + def _create_column_family(self): #construct query string qs = ['CREATE TABLE {}'.format(self.column_family_name)] diff --git a/cassandraengine/tests/model/test_model_io.py b/cassandraengine/tests/model/test_model_io.py index 7680a7897d..8adf46913c 100644 --- a/cassandraengine/tests/model/test_model_io.py +++ b/cassandraengine/tests/model/test_model_io.py @@ -38,6 +38,15 @@ def test_model_updating_works_properly(self): tm2 = TestModel.objects.find(tm.pk) self.assertEquals(tm.count, tm2.count) + def test_model_deleting_works_properly(self): + """ + Tests that an instance's delete method deletes the instance + """ + tm = TestModel.objects.create(count=8, text='123456789') + tm.delete() + tm2 = TestModel.objects.find(tm.pk) + self.assertIsNone(tm2) + def test_nullable_columns_are_saved_properly(self): """ Tests that nullable columns save without any trouble From 142ceb184cbcea853182e6a20511a8c8b17cc6aa Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 11 Nov 2012 16:37:26 -0800 Subject: [PATCH 0013/3726] modified model metaclass to replace column definitions with properties that modify the columns value member instead of removing them until instantiation --- cassandraengine/columns.py | 45 +++++++++++++++---- cassandraengine/models.py | 27 ++++++----- .../tests/model/test_class_construction.py | 4 +- 3 files changed, 55 insertions(+), 21 deletions(-) diff --git a/cassandraengine/columns.py b/cassandraengine/columns.py index 67f697ea25..20f54ba8d7 100644 --- a/cassandraengine/columns.py +++ b/cassandraengine/columns.py @@ -21,6 +21,8 @@ def __init__(self, primary_key=False, db_field=None, default=None, null=False): self.default = default self.null = null + self.value = None + def validate(self, value): """ Returns a cleaned and validated value. Raises a ValidationError @@ -40,6 +42,14 @@ def to_python(self, value): """ return value + def to_database(self, value): + """ + Converts python value into database value + """ + if value is None and self.has_default: + return self.get_default() + return value + @property def has_default(self): return bool(self.default) @@ -48,6 +58,33 @@ def has_default(self): def is_primary_key(self): return self.primary_key + #methods for replacing column definitions with properties that interact + #with a column's value member + #this will allow putting logic behind value access (lazy loading, etc) + def _getval(self): + """ This columns value getter """ + return self.value + + def _setval(self, val): + """ This columns value setter """ + self.value = val + + def _delval(self): + """ This columns value deleter """ + raise NotImplementedError + + def get_property(self, allow_delete=True): + """ + Returns the property object that will set and get this + column's value and be assigned to this column's model attribute + """ + getval = lambda slf: self._getval() + setval = lambda slf, val: self._setval(val) + delval = lambda slf: self._delval() + if not allow_delete: + return property(getval, setval) + return property(getval, setval, delval) + def get_default(self): if self.has_default: if callable(self.default): @@ -55,14 +92,6 @@ def get_default(self): else: return self.default - def to_database(self, value): - """ - Converts python value into database value - """ - if value is None and self.has_default: - return self.get_default() - return value - def get_column_def(self): """ Returns a column definition for CQL table definition diff --git a/cassandraengine/models.py b/cassandraengine/models.py index 040682d4f8..3d1388f946 100644 --- a/cassandraengine/models.py +++ b/cassandraengine/models.py @@ -32,7 +32,7 @@ def find(cls, pk): @property def pk(self): - """ Returns the object's primary key, regardless of it's name """ + """ Returns the object's primary key """ return getattr(self, self._pk_name) #dynamic column methods @@ -78,26 +78,31 @@ def __new__(cls, name, bases, attrs): #and set default column names _columns = {} pk_name = None + + def _transform_column(col_name, col_obj): + _columns[col_name] = col_obj + col_obj.set_db_name(col_name) + allow_delete = not col_obj.primary_key + attrs[col_name] = col_obj.get_property(allow_delete=allow_delete) + + #transform column definitions for k,v in attrs.items(): if isinstance(v, columns.BaseColumn): if v.is_primary_key: if pk_name: raise ModelException("More than one primary key defined for {}".format(name)) pk_name = k - _columns[k] = attrs.pop(k) - _columns[k].set_db_name(k) + _transform_column(k,v) #set primary key if it's not already defined if not pk_name: - _columns['id'] = columns.UUID(primary_key=True) - _columns['id'].set_db_name('id') - pk_name = 'id' + k,v = 'id', columns.UUID(primary_key=True) + _transform_column(k,v) + pk_name = k - #setup pk shortcut + #setup primary key shortcut if pk_name != 'pk': - pk_get = lambda self: getattr(self, pk_name) - pk_set = lambda self, val: setattr(self, pk_name, val) - attrs['pk'] = property(pk_get, pk_set) + attrs['pk'] = _columns[pk_name].get_property(allow_delete=False) #check for duplicate column names col_names = set() @@ -107,7 +112,7 @@ def __new__(cls, name, bases, attrs): col_names.add(v.db_field) #get column family name - cf_name = attrs.pop('db_name', None) or name + cf_name = attrs.pop('db_name', name) #create db_name -> model name map for loading db_map = {} diff --git a/cassandraengine/tests/model/test_class_construction.py b/cassandraengine/tests/model/test_class_construction.py index cc24a8c08b..5e3dbe1968 100644 --- a/cassandraengine/tests/model/test_class_construction.py +++ b/cassandraengine/tests/model/test_class_construction.py @@ -19,8 +19,8 @@ class TestModel(Model): text = columns.Text() self.assertHasAttr(TestModel, '_columns') - self.assertNotHasAttr(TestModel, 'id') - self.assertNotHasAttr(TestModel, 'text') + self.assertHasAttr(TestModel, 'id') + self.assertHasAttr(TestModel, 'text') inst = TestModel() self.assertHasAttr(inst, 'id') From 83cf23da7c9f0dfc3bc36884c69ab9e334ea8d12 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 11 Nov 2012 22:14:40 -0800 Subject: [PATCH 0014/3726] cleaning things up a bit --- README.md | 9 ++++----- cassandraengine/columns.py | 5 +++++ cassandraengine/manager.py | 8 ++------ cassandraengine/models.py | 2 ++ cassandraengine/query.py | 11 +++++++++++ 5 files changed, 24 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index 136523416c..b3349e0d80 100644 --- a/README.md +++ b/README.md @@ -1,22 +1,21 @@ cassandraengine =============== -Python Cassandra ORM in the style of django / mongoengine +Cassandra ORM for Python in the style of the Django orm and mongoengine In it's current state you can define column families, create and delete column families based on your model definiteions, save models and retrieve models by their primary keys. -That's about it. Also, the CQL stuff is pretty simple at this point. +That's about it. Also, the CQL stuff is very basic at this point. ##TODO -* Complex queries (class Q(object)) -* Match column names to mongoengine field names? +* Real querying * mongoengine fields? URLField, EmbeddedDocument, ListField, DictField * column ttl? * ForeignKey/DBRef fields? * dynamic column support -* tests * query functionality +* Match column names to mongoengine field names? * nice column and model class __repr__ diff --git a/cassandraengine/columns.py b/cassandraengine/columns.py index 20f54ba8d7..22a533c6d4 100644 --- a/cassandraengine/columns.py +++ b/cassandraengine/columns.py @@ -197,11 +197,16 @@ def __init__(self, **kwargs): #TODO: research supercolumns #http://wiki.apache.org/cassandra/DataModel class List(BaseColumn): + #checkout cql.cqltypes.ListType def __init__(self, **kwargs): super(DateTime, self).__init__(**kwargs) raise NotImplementedError class Dict(BaseColumn): + #checkout cql.cqltypes.MapType def __init__(self, **kwargs): super(DateTime, self).__init__(**kwargs) raise NotImplementedError + +#checkout cql.cqltypes.SetType +#checkout cql.cqltypes.CompositeType diff --git a/cassandraengine/manager.py b/cassandraengine/manager.py index 3f7ec7f66e..de061ce0fb 100644 --- a/cassandraengine/manager.py +++ b/cassandraengine/manager.py @@ -23,9 +23,7 @@ def column_family_name(self): return cf_name def __call__(self, **kwargs): - """ - filter shortcut - """ + """ filter shortcut """ return self.filter(**kwargs) def find(self, pk): @@ -71,9 +69,7 @@ def _save_instance(self, instance): QuerySet(self.model).save(instance) def _delete_instance(self, instance): - """ - Deletes a single instance - """ + """ Deletes a single instance """ QuerySet(self.model).delete_instance(instance) #----column family create/delete---- diff --git a/cassandraengine/models.py b/cassandraengine/models.py index 3d1388f946..fdc965581d 100644 --- a/cassandraengine/models.py +++ b/cassandraengine/models.py @@ -119,11 +119,13 @@ def _transform_column(col_name, col_obj): for name, col in _columns.items(): db_map[col.db_field] = name + #add management members to the class attrs['_columns'] = _columns attrs['_db_map'] = db_map attrs['_pk_name'] = pk_name attrs['_dynamic_columns'] = {} + #create the class and add a manager to it klass = super(ModelMetaClass, cls).__new__(cls, name, bases, attrs) klass.objects = Manager(klass) return klass diff --git a/cassandraengine/query.py b/cassandraengine/query.py index 025d135ab6..817b03960e 100644 --- a/cassandraengine/query.py +++ b/cassandraengine/query.py @@ -2,11 +2,22 @@ from cassandraengine.connection import get_connection +#CQL 3 reference: +#http://www.datastax.com/docs/1.1/references/cql/index + +class Query(object): + + pass + class QuerySet(object): #TODO: querysets should be immutable #TODO: querysets should be executed lazily #TODO: conflicting filter args should raise exception unless a force kwarg is supplied + #CQL supports ==, >, >=, <, <=, IN (a,b,c,..n) + #REVERSE, LIMIT + #ORDER BY + def __init__(self, model, query_args={}): super(QuerySet, self).__init__() self.model = model From b8103dd33bd29c83572cdf013759a865bf227437 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Mon, 12 Nov 2012 21:40:34 -0800 Subject: [PATCH 0015/3726] removed restriction on multiple primary keys reworked metaclass to preserve column definition order adjusted primary key declaration in table definition cql removed dynamic columns --- README.md | 3 -- cassandraengine/columns.py | 12 +++++- cassandraengine/models.py | 39 +++++++------------ cassandraengine/query.py | 8 ++-- .../tests/model/test_class_construction.py | 19 +++++---- cassandraengine/tests/model/test_model_io.py | 18 --------- 6 files changed, 41 insertions(+), 58 deletions(-) diff --git a/README.md b/README.md index b3349e0d80..cc2d2fc976 100644 --- a/README.md +++ b/README.md @@ -9,11 +9,8 @@ based on your model definiteions, save models and retrieve models by their prima That's about it. Also, the CQL stuff is very basic at this point. ##TODO -* Real querying -* mongoengine fields? URLField, EmbeddedDocument, ListField, DictField * column ttl? * ForeignKey/DBRef fields? -* dynamic column support * query functionality * Match column names to mongoengine field names? * nice column and model class __repr__ diff --git a/cassandraengine/columns.py b/cassandraengine/columns.py index 22a533c6d4..bcbf39e7f5 100644 --- a/cassandraengine/columns.py +++ b/cassandraengine/columns.py @@ -9,6 +9,8 @@ class BaseColumn(object): #the cassandra type this column maps to db_type = None + instance_counter = 0 + def __init__(self, primary_key=False, db_field=None, default=None, null=False): """ :param primary_key: bool flag, there can be only one primary key per doc @@ -23,6 +25,10 @@ def __init__(self, primary_key=False, db_field=None, default=None, null=False): self.value = None + #keep track of instantiation order + self.position = BaseColumn.instance_counter + BaseColumn.instance_counter += 1 + def validate(self, value): """ Returns a cleaned and validated value. Raises a ValidationError @@ -97,8 +103,8 @@ def get_column_def(self): Returns a column definition for CQL table definition """ dterms = [self.db_field, self.db_type] - if self.primary_key: - dterms.append('PRIMARY KEY') + #if self.primary_key: + #dterms.append('PRIMARY KEY') return ' '.join(dterms) def set_db_name(self, name): @@ -196,6 +202,8 @@ def __init__(self, **kwargs): #TODO: research supercolumns #http://wiki.apache.org/cassandra/DataModel +#checkout composite columns: +#http://www.datastax.com/dev/blog/introduction-to-composite-columns-part-1 class List(BaseColumn): #checkout cql.cqltypes.ListType def __init__(self, **kwargs): diff --git a/cassandraengine/models.py b/cassandraengine/models.py index fdc965581d..76f3d99cd6 100644 --- a/cassandraengine/models.py +++ b/cassandraengine/models.py @@ -1,3 +1,4 @@ +from collections import OrderedDict from cassandraengine import columns from cassandraengine.exceptions import ModelException @@ -17,8 +18,6 @@ def __init__(self, **values): for k,v in values.items(): if k in self._columns: setattr(self, k, v) - else: - self._dynamic_columns[k] = v #set excluded columns to None for k in self._columns.keys(): @@ -35,16 +34,6 @@ def pk(self): """ Returns the object's primary key """ return getattr(self, self._pk_name) - #dynamic column methods - def __getitem__(self, key): - return self._dynamic_columns[key] - - def __setitem__(self, key, val): - self._dynamic_columns[key] = val - - def __delitem__(self, key): - del self._dynamic_columns[key] - def validate(self): """ Cleans and validates the field values """ for name, col in self._columns.items(): @@ -76,7 +65,7 @@ def __new__(cls, name, bases, attrs): """ #move column definitions into _columns dict #and set default column names - _columns = {} + _columns = OrderedDict() pk_name = None def _transform_column(col_name, col_obj): @@ -85,20 +74,20 @@ def _transform_column(col_name, col_obj): allow_delete = not col_obj.primary_key attrs[col_name] = col_obj.get_property(allow_delete=allow_delete) - #transform column definitions - for k,v in attrs.items(): - if isinstance(v, columns.BaseColumn): - if v.is_primary_key: - if pk_name: - raise ModelException("More than one primary key defined for {}".format(name)) - pk_name = k - _transform_column(k,v) - - #set primary key if it's not already defined - if not pk_name: + #import ipdb; ipdb.set_trace() + column_definitions = [(k,v) for k,v in attrs.items() if isinstance(v, columns.BaseColumn)] + column_definitions = sorted(column_definitions, lambda x,y: cmp(x[1].position, y[1].position)) + + #prepend primary key if none has been defined + if not any([v.primary_key for k,v in column_definitions]): k,v = 'id', columns.UUID(primary_key=True) + column_definitions = [(k,v)] + column_definitions + + #transform column definitions + for k,v in column_definitions: + if pk_name is None and v.primary_key: + pk_name = k _transform_column(k,v) - pk_name = k #setup primary key shortcut if pk_name != 'pk': diff --git a/cassandraengine/query.py b/cassandraengine/query.py index 817b03960e..8cd7328519 100644 --- a/cassandraengine/query.py +++ b/cassandraengine/query.py @@ -158,16 +158,18 @@ def _create_column_family(self): qs = ['CREATE TABLE {}'.format(self.column_family_name)] #add column types + pkeys = [] qtypes = [] def add_column(col): s = '{} {}'.format(col.db_field, col.db_type) - if col.primary_key: s += ' PRIMARY KEY' + if col.primary_key: pkeys.append(col.db_field) qtypes.append(s) - add_column(self.model._columns[self.model._pk_name]) + #add_column(self.model._columns[self.model._pk_name]) for name, col in self.model._columns.items(): - if col.primary_key: continue add_column(col) + qtypes.append('PRIMARY KEY ({})'.format(', '.join(pkeys))) + qs += ['({})'.format(', '.join(qtypes))] qs = ' '.join(qs) diff --git a/cassandraengine/tests/model/test_class_construction.py b/cassandraengine/tests/model/test_class_construction.py index 5e3dbe1968..7da6f1e92a 100644 --- a/cassandraengine/tests/model/test_class_construction.py +++ b/cassandraengine/tests/model/test_class_construction.py @@ -28,13 +28,6 @@ class TestModel(Model): self.assertIsNone(inst.id) self.assertIsNone(inst.text) - def test_multiple_primary_keys_fail(self): - """Test attempting to define multiple primary keys fails""" - with self.assertRaises(ModelException): - class MultiPK(Model): - id1 = columns.Integer(primary_key=True) - id2 = columns.Integer(primary_key=True) - def test_db_map(self): """ Tests that the db_map is properly defined @@ -58,3 +51,15 @@ class BadNames(Model): words = columns.Text() content = columns.Text(db_field='words') + def test_column_ordering_is_preserved(self): + """ + Tests that the _columns dics retains the ordering of the class definition + """ + + class Stuff(Model): + words = columns.Text() + content = columns.Text() + numbers = columns.Integer() + + self.assertEquals(Stuff._columns.keys(), ['id', 'words', 'content', 'numbers']) + diff --git a/cassandraengine/tests/model/test_model_io.py b/cassandraengine/tests/model/test_model_io.py index 8adf46913c..788b36bb92 100644 --- a/cassandraengine/tests/model/test_model_io.py +++ b/cassandraengine/tests/model/test_model_io.py @@ -52,21 +52,3 @@ def test_nullable_columns_are_saved_properly(self): Tests that nullable columns save without any trouble """ - @skip - def test_dynamic_columns(self): - """ - Tests that items put into dynamic columns are saved and retrieved properly - - Note: seems I've misunderstood how arbitrary column names work in Cassandra - skipping for now - """ - #TODO:Fix this - tm = TestModel(count=8, text='123456789') - tm['other'] = 'something' - tm['number'] = 5 - tm.save() - - tm2 = TestModel.objects.find(tm.pk) - self.assertEquals(tm['other'], tm2['other']) - self.assertEquals(tm['number'], tm2['number']) - From fc1de7259604dbb973903fd58fba332790ad9c43 Mon Sep 17 00:00:00 2001 From: Eric Scrivner Date: Mon, 12 Nov 2012 22:04:12 -0800 Subject: [PATCH 0016/3726] Add Vagrantfile - Add complete virtual environment with Python 2.7 and Cassandra 1.1.6 for testing. --- Vagrantfile | 46 ++++++++++++++++ manifests/default.pp | 66 +++++++++++++++++++++++ modules/cassandra/files/cassandra.upstart | 18 +++++++ 3 files changed, 130 insertions(+) create mode 100644 Vagrantfile create mode 100644 manifests/default.pp create mode 100644 modules/cassandra/files/cassandra.upstart diff --git a/Vagrantfile b/Vagrantfile new file mode 100644 index 0000000000..6d8ccc8ad1 --- /dev/null +++ b/Vagrantfile @@ -0,0 +1,46 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : + +Vagrant::Config.run do |config| + # All Vagrant configuration is done here. The most common configuration + # options are documented and commented below. For a complete reference, + # please see the online documentation at vagrantup.com. + + # Every Vagrant virtual environment requires a box to build off of. + config.vm.box = "precise" + + # The url from where the 'config.vm.box' box will be fetched if it + # doesn't already exist on the user's system. + config.vm.box_url = "https://s3-us-west-2.amazonaws.com/graphplatform.swmirror/precise64.box" + + # Boot with a GUI so you can see the screen. (Default is headless) + # config.vm.boot_mode = :gui + + # Assign this VM to a host-only network IP, allowing you to access it + # via the IP. Host-only networks can talk to the host machine as well as + # any other machines on the same network, but cannot be accessed (through this + # network interface) by any external networks. + config.vm.network :hostonly, "192.168.33.10" + + config.vm.customize ["modifyvm", :id, "--memory", "2048"] + + # Forward a port from the guest to the host, which allows for outside + # computers to access the VM, whereas host only networking does not. + config.vm.forward_port 80, 8080 + + # Share an additional folder to the guest VM. The first argument is + # an identifier, the second is the path on the guest to mount the + # folder, and the third is the path on the host to the actual folder. + # config.vm.share_folder "v-data", "/vagrant_data", "../data" + #config.vm.share_folder "v-root" "/vagrant", ".", :nfs => true + + # Provision with puppet + config.vm.provision :shell, :inline => "apt-get update" + + config.vm.provision :puppet, :options => ['--verbose', '--debug'] do |puppet| + puppet.facter = {'hostname' => 'cassandraengine'} + # puppet.manifests_path = "puppet/manifests" + # puppet.manifest_file = "site.pp" + puppet.module_path = "modules" + end +end \ No newline at end of file diff --git a/manifests/default.pp b/manifests/default.pp new file mode 100644 index 0000000000..1cf748cb71 --- /dev/null +++ b/manifests/default.pp @@ -0,0 +1,66 @@ +# Basic virtualbox configuration +Exec { path => "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" } + +node basenode { + package{["build-essential", "git-core", "vim"]: + ensure => installed + } +} + +class xfstools { + package{['lvm2', 'xfsprogs']: + ensure => installed + } +} +class java { + package {['openjdk-7-jre-headless']: + ensure => installed + } +} + +class cassandra { + include xfstools + include java + + package {"wget": + ensure => latest + } + + file {"/etc/init/cassandra.conf": + source => "puppet:///modules/cassandra/cassandra.upstart", + owner => root + } + + exec {"download-cassandra": + cwd => "/tmp", + command => "wget http://download.nextag.com/apache/cassandra/1.1.6/apache-cassandra-1.1.6-bin.tar.gz", + creates => "/tmp/apache-cassandra-1.1.6-bin.tar.gz", + require => [Package["wget"], File["/etc/init/cassandra.conf"]] + } + + exec {"install-cassandra": + cwd => "/tmp", + command => "tar -xzf apache-cassandra-1.1.6-bin.tar.gz; mv apache-cassandra-1.1.6 /usr/local/cassandra", + require => Exec["download-cassandra"], + creates => "/usr/local/cassandra/bin/cassandra" + } + + service {"cassandra": + ensure => running, + require => Exec["install-cassandra"] + } +} + +node cassandraengine inherits basenode { + include cassandra + + package {["python-pip", "python-dev"]: + ensure => installed + } + + exec {"install-requirements": + cwd => "/vagrant", + command => "pip install -r requirements.txt", + require => [Package["python-pip"], Package["python-dev"]] + } +} diff --git a/modules/cassandra/files/cassandra.upstart b/modules/cassandra/files/cassandra.upstart new file mode 100644 index 0000000000..05278cad04 --- /dev/null +++ b/modules/cassandra/files/cassandra.upstart @@ -0,0 +1,18 @@ +# Cassandra Upstart +# + +description "Cassandra" + +start on runlevel [2345] +stop on runlevel [!2345] + +respawn + +exec /usr/local/cassandra/bin/cassandra -f + + + +pre-stop script + kill `cat /tmp/cassandra.pid` + sleep 3 +end script \ No newline at end of file From fd881636ea01d58673d12389fe98df925bad6eb1 Mon Sep 17 00:00:00 2001 From: Eric Scrivner Date: Mon, 12 Nov 2012 22:04:41 -0800 Subject: [PATCH 0017/3726] Add Ignore For Vagrant - Add ignore for .vagrant file --- .gitignore | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index f24cd9952d..878f8ebd3c 100644 --- a/.gitignore +++ b/.gitignore @@ -12,7 +12,7 @@ var sdist develop-eggs .installed.cfg - +.vagrant # Installer logs pip-log.txt From 718003362102c078d608afbc6ccb3fb740d79e0b Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Tue, 13 Nov 2012 21:17:21 -0800 Subject: [PATCH 0018/3726] fixed a problem with the value management of instances --- cassandraengine/models.py | 7 +++--- .../tests/model/test_class_construction.py | 22 ++++++++++++++++++- 2 files changed, 24 insertions(+), 5 deletions(-) diff --git a/cassandraengine/models.py b/cassandraengine/models.py index 76f3d99cd6..2b425bbc8e 100644 --- a/cassandraengine/models.py +++ b/cassandraengine/models.py @@ -71,10 +71,7 @@ def __new__(cls, name, bases, attrs): def _transform_column(col_name, col_obj): _columns[col_name] = col_obj col_obj.set_db_name(col_name) - allow_delete = not col_obj.primary_key - attrs[col_name] = col_obj.get_property(allow_delete=allow_delete) - #import ipdb; ipdb.set_trace() column_definitions = [(k,v) for k,v in attrs.items() if isinstance(v, columns.BaseColumn)] column_definitions = sorted(column_definitions, lambda x,y: cmp(x[1].position, y[1].position)) @@ -91,7 +88,9 @@ def _transform_column(col_name, col_obj): #setup primary key shortcut if pk_name != 'pk': - attrs['pk'] = _columns[pk_name].get_property(allow_delete=False) + pk_get = lambda self: getattr(self, pk_name) + pk_set = lambda self, val: setattr(self, pk_name, val) + attrs['pk'] = property(pk_get, pk_set) #check for duplicate column names col_names = set() diff --git a/cassandraengine/tests/model/test_class_construction.py b/cassandraengine/tests/model/test_class_construction.py index 7da6f1e92a..b1659fa122 100644 --- a/cassandraengine/tests/model/test_class_construction.py +++ b/cassandraengine/tests/model/test_class_construction.py @@ -18,10 +18,12 @@ def test_column_attributes_handled_correctly(self): class TestModel(Model): text = columns.Text() + #check class attributes self.assertHasAttr(TestModel, '_columns') - self.assertHasAttr(TestModel, 'id') + self.assertNotHasAttr(TestModel, 'id') self.assertHasAttr(TestModel, 'text') + #check instance attributes inst = TestModel() self.assertHasAttr(inst, 'id') self.assertHasAttr(inst, 'text') @@ -63,3 +65,21 @@ class Stuff(Model): self.assertEquals(Stuff._columns.keys(), ['id', 'words', 'content', 'numbers']) + def test_value_managers_are_keeping_model_instances_isolated(self): + """ + Tests that instance value managers are isolated from other instances + """ + class Stuff(Model): + num = columns.Integer() + + inst1 = Stuff(num=5) + inst2 = Stuff(num=7) + + self.assertNotEquals(inst1.num, inst2.num) + self.assertEquals(inst1.num, 5) + self.assertEquals(inst2.num, 7) + + def test_meta_data_is_not_inherited(self): + """ + Test that metadata defined in one class, is not inherited by subclasses + """ From eab79f6900c3d6bb69702e04c58bb535519c2640 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 18 Nov 2012 17:34:35 -0800 Subject: [PATCH 0019/3726] changing name to cqlengine --- cassandraengine/__init__.py | 3 -- .../tests/columns/test_validation.py | 16 -------- cqlengine/__init__.py | 3 ++ {cassandraengine => cqlengine}/columns.py | 37 ++++++++++++++++++- {cassandraengine => cqlengine}/connection.py | 0 {cassandraengine => cqlengine}/exceptions.py | 2 +- {cassandraengine => cqlengine}/manager.py | 2 +- {cassandraengine => cqlengine}/models.py | 22 +++++------ {cassandraengine => cqlengine}/query.py | 2 +- .../tests/__init__.py | 0 {cassandraengine => cqlengine}/tests/base.py | 0 .../tests/columns/__init__.py | 0 cqlengine/tests/columns/test_validation.py | 16 ++++++++ .../tests/model/__init__.py | 0 .../tests/model/test_class_construction.py | 8 ++-- .../tests/model/test_model_io.py | 6 +-- .../tests/model/test_validation.py | 0 17 files changed, 74 insertions(+), 43 deletions(-) delete mode 100644 cassandraengine/__init__.py delete mode 100644 cassandraengine/tests/columns/test_validation.py create mode 100644 cqlengine/__init__.py rename {cassandraengine => cqlengine}/columns.py (87%) rename {cassandraengine => cqlengine}/connection.py (100%) rename {cassandraengine => cqlengine}/exceptions.py (75%) rename {cassandraengine => cqlengine}/manager.py (98%) rename {cassandraengine => cqlengine}/models.py (89%) rename {cassandraengine => cqlengine}/query.py (99%) rename {cassandraengine => cqlengine}/tests/__init__.py (100%) rename {cassandraengine => cqlengine}/tests/base.py (100%) rename {cassandraengine => cqlengine}/tests/columns/__init__.py (100%) create mode 100644 cqlengine/tests/columns/test_validation.py rename {cassandraengine => cqlengine}/tests/model/__init__.py (100%) rename {cassandraengine => cqlengine}/tests/model/test_class_construction.py (93%) rename {cassandraengine => cqlengine}/tests/model/test_model_io.py (91%) rename {cassandraengine => cqlengine}/tests/model/test_validation.py (100%) diff --git a/cassandraengine/__init__.py b/cassandraengine/__init__.py deleted file mode 100644 index af95d0b003..0000000000 --- a/cassandraengine/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from cassandraengine.columns import * -from cassandraengine.models import Model - diff --git a/cassandraengine/tests/columns/test_validation.py b/cassandraengine/tests/columns/test_validation.py deleted file mode 100644 index e42519cc3b..0000000000 --- a/cassandraengine/tests/columns/test_validation.py +++ /dev/null @@ -1,16 +0,0 @@ -#tests the behavior of the column classes - -from cassandraengine.tests.base import BaseCassEngTestCase - -from cassandraengine.columns import BaseColumn -from cassandraengine.columns import Bytes -from cassandraengine.columns import Ascii -from cassandraengine.columns import Text -from cassandraengine.columns import Integer -from cassandraengine.columns import DateTime -from cassandraengine.columns import UUID -from cassandraengine.columns import Boolean -from cassandraengine.columns import Float -from cassandraengine.columns import Decimal - - diff --git a/cqlengine/__init__.py b/cqlengine/__init__.py new file mode 100644 index 0000000000..1af8944a10 --- /dev/null +++ b/cqlengine/__init__.py @@ -0,0 +1,3 @@ +from cqlengine.columns import * +from cqlengine.models import Model + diff --git a/cassandraengine/columns.py b/cqlengine/columns.py similarity index 87% rename from cassandraengine/columns.py rename to cqlengine/columns.py index bcbf39e7f5..25a6f59b34 100644 --- a/cassandraengine/columns.py +++ b/cqlengine/columns.py @@ -2,12 +2,43 @@ import re from uuid import uuid1, uuid4 -from cassandraengine.exceptions import ValidationError +from cqlengine.exceptions import ValidationError + +class BaseValueManager(object): + + def __init__(self, instance, column, value): + self.instance = instance + self.column = column + self.initial_value = value + self.value = value + + def deleted(self): + return self.value is None and self.initial_value is not None + + def getval(self): + return self.value + + def setval(self, val): + self.value = val + + def delval(self): + self.value = None + + def get_property(self): + _get = lambda slf: self.getval() + _set = lambda slf, val: self.setval(val) + _del = lambda slf: self.delval() + + if self.column.can_delete: + return property(_get, _set, _del) + else: + return property(_get, _set) class BaseColumn(object): #the cassandra type this column maps to db_type = None + value_manager = BaseValueManager instance_counter = 0 @@ -64,6 +95,10 @@ def has_default(self): def is_primary_key(self): return self.primary_key + @property + def can_delete(self): + return not self.primary_key + #methods for replacing column definitions with properties that interact #with a column's value member #this will allow putting logic behind value access (lazy loading, etc) diff --git a/cassandraengine/connection.py b/cqlengine/connection.py similarity index 100% rename from cassandraengine/connection.py rename to cqlengine/connection.py diff --git a/cassandraengine/exceptions.py b/cqlengine/exceptions.py similarity index 75% rename from cassandraengine/exceptions.py rename to cqlengine/exceptions.py index 4b20f7a334..3bb026f214 100644 --- a/cassandraengine/exceptions.py +++ b/cqlengine/exceptions.py @@ -1,4 +1,4 @@ -#cassandraengine exceptions +#cqlengine exceptions class ModelException(BaseException): pass class ValidationError(BaseException): pass diff --git a/cassandraengine/manager.py b/cqlengine/manager.py similarity index 98% rename from cassandraengine/manager.py rename to cqlengine/manager.py index de061ce0fb..4d4cbb73bc 100644 --- a/cassandraengine/manager.py +++ b/cqlengine/manager.py @@ -1,6 +1,6 @@ #manager class -from cassandraengine.query import QuerySet +from cqlengine.query import QuerySet class Manager(object): diff --git a/cassandraengine/models.py b/cqlengine/models.py similarity index 89% rename from cassandraengine/models.py rename to cqlengine/models.py index 2b425bbc8e..b44cdebe76 100644 --- a/cassandraengine/models.py +++ b/cqlengine/models.py @@ -1,8 +1,8 @@ from collections import OrderedDict -from cassandraengine import columns -from cassandraengine.exceptions import ModelException -from cassandraengine.manager import Manager +from cqlengine import columns +from cqlengine.exceptions import ModelException +from cqlengine.manager import Manager class BaseModel(object): """ @@ -11,18 +11,14 @@ class BaseModel(object): #table names will be generated automatically from it's model name and package #however, you can alse define them manually here - db_name = None + db_name = None def __init__(self, **values): - #set columns from values - for k,v in values.items(): - if k in self._columns: - setattr(self, k, v) - - #set excluded columns to None - for k in self._columns.keys(): - if k not in values: - setattr(self, k, None) + self._values = {} + for name, column in self._columns.items(): + value_mngr = column.value_manager(self, column, values.get(name, None)) + self._values[name] = value_mngr + setattr(self, name, value_mngr.get_property()) @classmethod def find(cls, pk): diff --git a/cassandraengine/query.py b/cqlengine/query.py similarity index 99% rename from cassandraengine/query.py rename to cqlengine/query.py index 8cd7328519..70343d6bce 100644 --- a/cassandraengine/query.py +++ b/cqlengine/query.py @@ -1,6 +1,6 @@ import copy -from cassandraengine.connection import get_connection +from cqlengine.connection import get_connection #CQL 3 reference: #http://www.datastax.com/docs/1.1/references/cql/index diff --git a/cassandraengine/tests/__init__.py b/cqlengine/tests/__init__.py similarity index 100% rename from cassandraengine/tests/__init__.py rename to cqlengine/tests/__init__.py diff --git a/cassandraengine/tests/base.py b/cqlengine/tests/base.py similarity index 100% rename from cassandraengine/tests/base.py rename to cqlengine/tests/base.py diff --git a/cassandraengine/tests/columns/__init__.py b/cqlengine/tests/columns/__init__.py similarity index 100% rename from cassandraengine/tests/columns/__init__.py rename to cqlengine/tests/columns/__init__.py diff --git a/cqlengine/tests/columns/test_validation.py b/cqlengine/tests/columns/test_validation.py new file mode 100644 index 0000000000..f916fb0ebe --- /dev/null +++ b/cqlengine/tests/columns/test_validation.py @@ -0,0 +1,16 @@ +#tests the behavior of the column classes + +from cqlengine.tests.base import BaseCassEngTestCase + +from cqlengine.columns import BaseColumn +from cqlengine.columns import Bytes +from cqlengine.columns import Ascii +from cqlengine.columns import Text +from cqlengine.columns import Integer +from cqlengine.columns import DateTime +from cqlengine.columns import UUID +from cqlengine.columns import Boolean +from cqlengine.columns import Float +from cqlengine.columns import Decimal + + diff --git a/cassandraengine/tests/model/__init__.py b/cqlengine/tests/model/__init__.py similarity index 100% rename from cassandraengine/tests/model/__init__.py rename to cqlengine/tests/model/__init__.py diff --git a/cassandraengine/tests/model/test_class_construction.py b/cqlengine/tests/model/test_class_construction.py similarity index 93% rename from cassandraengine/tests/model/test_class_construction.py rename to cqlengine/tests/model/test_class_construction.py index b1659fa122..bf6f3522d7 100644 --- a/cassandraengine/tests/model/test_class_construction.py +++ b/cqlengine/tests/model/test_class_construction.py @@ -1,8 +1,8 @@ -from cassandraengine.tests.base import BaseCassEngTestCase +from cqlengine.tests.base import BaseCassEngTestCase -from cassandraengine.exceptions import ModelException -from cassandraengine.models import Model -from cassandraengine import columns +from cqlengine.exceptions import ModelException +from cqlengine.models import Model +from cqlengine import columns class TestModelClassFunction(BaseCassEngTestCase): """ diff --git a/cassandraengine/tests/model/test_model_io.py b/cqlengine/tests/model/test_model_io.py similarity index 91% rename from cassandraengine/tests/model/test_model_io.py rename to cqlengine/tests/model/test_model_io.py index 788b36bb92..a080726ba6 100644 --- a/cassandraengine/tests/model/test_model_io.py +++ b/cqlengine/tests/model/test_model_io.py @@ -1,8 +1,8 @@ from unittest import skip -from cassandraengine.tests.base import BaseCassEngTestCase +from cqlengine.tests.base import BaseCassEngTestCase -from cassandraengine.models import Model -from cassandraengine import columns +from cqlengine.models import Model +from cqlengine import columns class TestModel(Model): count = columns.Integer() diff --git a/cassandraengine/tests/model/test_validation.py b/cqlengine/tests/model/test_validation.py similarity index 100% rename from cassandraengine/tests/model/test_validation.py rename to cqlengine/tests/model/test_validation.py From 39176b9660b0c722ad9f0a97e6a1711294455efa Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 18 Nov 2012 17:48:21 -0800 Subject: [PATCH 0020/3726] adding valuemanager class and setup to model metaclass --- cqlengine/models.py | 14 ++++++++++---- cqlengine/tests/model/test_class_construction.py | 2 +- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/cqlengine/models.py b/cqlengine/models.py index b44cdebe76..3fa7c6bfcd 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -18,7 +18,6 @@ def __init__(self, **values): for name, column in self._columns.items(): value_mngr = column.value_manager(self, column, values.get(name, None)) self._values[name] = value_mngr - setattr(self, name, value_mngr.get_property()) @classmethod def find(cls, pk): @@ -67,6 +66,15 @@ def __new__(cls, name, bases, attrs): def _transform_column(col_name, col_obj): _columns[col_name] = col_obj col_obj.set_db_name(col_name) + #set properties + _get = lambda self: self._values[col_name].getval() + _set = lambda self, val: self._values[col_name].setval(val) + _del = lambda self: self._values[col_name].delval() + if col_obj.can_delete: + attrs[col_name] = property(_get, _set) + else: + attrs[col_name] = property(_get, _set, _del) + column_definitions = [(k,v) for k,v in attrs.items() if isinstance(v, columns.BaseColumn)] column_definitions = sorted(column_definitions, lambda x,y: cmp(x[1].position, y[1].position)) @@ -84,9 +92,7 @@ def _transform_column(col_name, col_obj): #setup primary key shortcut if pk_name != 'pk': - pk_get = lambda self: getattr(self, pk_name) - pk_set = lambda self, val: setattr(self, pk_name, val) - attrs['pk'] = property(pk_get, pk_set) + attrs['pk'] = attrs[pk_name] #check for duplicate column names col_names = set() diff --git a/cqlengine/tests/model/test_class_construction.py b/cqlengine/tests/model/test_class_construction.py index bf6f3522d7..949e40fa77 100644 --- a/cqlengine/tests/model/test_class_construction.py +++ b/cqlengine/tests/model/test_class_construction.py @@ -20,7 +20,7 @@ class TestModel(Model): #check class attributes self.assertHasAttr(TestModel, '_columns') - self.assertNotHasAttr(TestModel, 'id') + self.assertHasAttr(TestModel, 'id') self.assertHasAttr(TestModel, 'text') #check instance attributes From 4152a37e80cc2fcad18af5d1f7f7672a258bf935 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Tue, 20 Nov 2012 21:48:41 -0800 Subject: [PATCH 0021/3726] adding nosetests to vagrant node --- manifests/default.pp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/manifests/default.pp b/manifests/default.pp index 1cf748cb71..3c5ad95642 100644 --- a/manifests/default.pp +++ b/manifests/default.pp @@ -54,7 +54,7 @@ node cassandraengine inherits basenode { include cassandra - package {["python-pip", "python-dev"]: + package {["python-pip", "python-dev", "python-nose"]: ensure => installed } From 61177eb3401ab151ed3072368f8cfb29e289b436 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Tue, 20 Nov 2012 23:22:58 -0800 Subject: [PATCH 0022/3726] starting to write the QuerySet class --- cqlengine/exceptions.py | 6 +- cqlengine/query.py | 134 ++++++++++++++++++++++--- cqlengine/tests/query/__init__.py | 0 cqlengine/tests/query/test_queryset.py | 52 ++++++++++ 4 files changed, 177 insertions(+), 15 deletions(-) create mode 100644 cqlengine/tests/query/__init__.py create mode 100644 cqlengine/tests/query/test_queryset.py diff --git a/cqlengine/exceptions.py b/cqlengine/exceptions.py index 3bb026f214..3a5444e9b1 100644 --- a/cqlengine/exceptions.py +++ b/cqlengine/exceptions.py @@ -1,4 +1,6 @@ #cqlengine exceptions -class ModelException(BaseException): pass -class ValidationError(BaseException): pass +class CQLEngineException(BaseException): pass +class ModelException(CQLEngineException): pass +class ValidationError(CQLEngineException): pass +class QueryException(CQLEngineException): pass diff --git a/cqlengine/query.py b/cqlengine/query.py index 70343d6bce..f410391097 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -1,18 +1,58 @@ +from collections import namedtuple import copy from cqlengine.connection import get_connection +from cqlengine.exceptions import QueryException #CQL 3 reference: #http://www.datastax.com/docs/1.1/references/cql/index -class Query(object): +WhereFilter = namedtuple('WhereFilter', ['column', 'operator', 'value']) - pass +class QueryOperatorException(QueryException): pass + +class QueryOperator(object): + symbol = None + + @classmethod + def get_operator(cls, symbol): + if not hasattr(cls, 'opmap'): + QueryOperator.opmap = {} + def _recurse(klass): + if klass.symbol: + QueryOperator.opmap[klass.symbol.upper()] = klass + for subklass in klass.__subclasses__(): + _recurse(subklass) + pass + _recurse(QueryOperator) + try: + return QueryOperator.opmap[symbol.upper()] + except KeyError: + raise QueryOperatorException("{} doesn't map to a QueryOperator".format(symbol)) + +class EqualsOperator(QueryOperator): + symbol = 'EQ' + +class InOperator(QueryOperator): + symbol = 'IN' + +class GreaterThanOperator(QueryOperator): + symbol = "GT" + +class GreaterThanOrEqualOperator(QueryOperator): + symbol = "GTE" + +class LessThanOperator(QueryOperator): + symbol = "LT" + +class LessThanOrEqualOperator(QueryOperator): + symbol = "LTE" class QuerySet(object): #TODO: querysets should be immutable #TODO: querysets should be executed lazily - #TODO: conflicting filter args should raise exception unless a force kwarg is supplied + #TODO: support specifying offset and limit (use slice) (maybe return a mutated queryset) + #TODO: support specifying columns to exclude or select only #CQL supports ==, >, >=, <, <=, IN (a,b,c,..n) #REVERSE, LIMIT @@ -21,9 +61,22 @@ class QuerySet(object): def __init__(self, model, query_args={}): super(QuerySet, self).__init__() self.model = model - self.query_args = query_args self.column_family_name = self.model.objects.column_family_name + #Where clause filters + self._where = [] + + #ordering arguments + self._order = [] + + #subset selection + self._limit = None + self._start = None + + #see the defer and only methods + self._defer_fields = [] + self._only_fields = [] + self._cursor = None #----query generation / execution---- @@ -31,7 +84,16 @@ def _execute_query(self): conn = get_connection() self._cursor = conn.cursor() - def _generate_querystring(self): + def _where_clause(self): + """ + Returns a where clause based on the given filter args + """ + pass + + def _select_query(self): + """ + Returns a select clause based on the given filter args + """ pass @property @@ -67,16 +129,36 @@ def first(self): pass def all(self): - return QuerySet(self.model) + clone = copy.deepcopy(self) + clone._where = [] + return clone + + def _parse_filter_arg(self, arg, val): + statement = arg.split('__') + if len(statement) == 1: + return WhereFilter(arg, None, val) + elif len(statement) == 2: + return WhereFilter(statement[0], statement[1], val) + else: + raise QueryException("Can't parse '{}'".format(arg)) def filter(self, **kwargs): - qargs = copy.deepcopy(self.query_args) - qargs.update(kwargs) - return QuerySet(self.model, query_args=qargs) + #add arguments to the where clause filters + clone = copy.deepcopy(self) + for arg, val in kwargs.items(): + raw_statement = self._parse_filter_arg(arg, val) + #resolve column and operator + try: + column = self.model._columns[raw_statement.column] + except KeyError: + raise QueryException("Can't resolve column name: '{}'".format(raw_statement.column)) - def exclude(self, **kwargs): - """ Need to invert the logic for all kwargs """ - pass + operator = QueryOperator.get_operator(raw_statement.operator) + + statement = WhereFilter(column, operator, val) + clone._where.append(statement) + + return clone def count(self): """ Returns the number of rows matched by this query """ @@ -95,6 +177,32 @@ def find(self, pk): self._cursor.execute(qs, {self.model._pk_name:pk}) return self._get_next() + def _only_or_defer(self, action, fields): + clone = copy.deepcopy(self) + if clone._defer_fields or clone._only_fields: + raise QueryException("QuerySet alread has only or defer fields defined") + + #check for strange fields + missing_fields = [f for f in fields if f not in self.model._columns.keys()] + if missing_fields: + raise QueryException("Can't resolve fields {} in {}".format(', '.join(missing_fields), self.model.__name__)) + + if action == 'defer': + clone._defer_fields = fields + elif action == 'only': + clone._only_fields = fields + else: + raise ValueError + + return clone + + def only(self, fields): + """ Load only these fields for the returned query """ + return self._only_or_defer('only', fields) + + def defer(self, fields): + """ Don't load these fields for the returned query """ + return self._only_or_defer('defer', fields) #----writes---- def save(self, instance): @@ -137,7 +245,7 @@ def save(self, instance): cur.execute(qs, field_values) #----delete--- - def delete(self): + def delete(self, columns=[]): """ Deletes the contents of a query """ diff --git a/cqlengine/tests/query/__init__.py b/cqlengine/tests/query/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cqlengine/tests/query/test_queryset.py b/cqlengine/tests/query/test_queryset.py new file mode 100644 index 0000000000..ef1bd9c3f2 --- /dev/null +++ b/cqlengine/tests/query/test_queryset.py @@ -0,0 +1,52 @@ +from cqlengine.tests.base import BaseCassEngTestCase + +from cqlengine.exceptions import ModelException +from cqlengine.models import Model +from cqlengine import columns + +class TestQuerySet(BaseCassEngTestCase): + + def test_query_filter_parsing(self): + """ + Tests the queryset filter method + """ + + def test_where_clause_generation(self): + """ + Tests the where clause creation + """ + + def test_querystring_generation(self): + """ + Tests the select querystring creation + """ + + def test_queryset_is_immutable(self): + """ + Tests that calling a queryset function that changes it's state returns a new queryset + """ + + def test_queryset_slicing(self): + """ + Check that the limit and start is implemented as iterator slices + """ + + def test_proper_delete_behavior(self): + """ + Tests that deleting the contents of a queryset works properly + """ + + def test_the_all_method_clears_where_filter(self): + """ + Tests that calling all on a queryset with previously defined filters returns a queryset with no filters + """ + + def test_defining_only_and_defer_fails(self): + """ + Tests that trying to add fields to either only or defer, or doing so more than once fails + """ + + def test_defining_only_or_defer_fields_fails(self): + """ + Tests that setting only or defer fields that don't exist raises an exception + """ From 24fb184f711c22ce2147e6bfd680af4201206e7b Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Wed, 21 Nov 2012 07:39:13 -0800 Subject: [PATCH 0023/3726] removing some outdated property stuff --- cqlengine/columns.py | 27 --------------------------- 1 file changed, 27 deletions(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index 25a6f59b34..6ff219cfe9 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -99,33 +99,6 @@ def is_primary_key(self): def can_delete(self): return not self.primary_key - #methods for replacing column definitions with properties that interact - #with a column's value member - #this will allow putting logic behind value access (lazy loading, etc) - def _getval(self): - """ This columns value getter """ - return self.value - - def _setval(self, val): - """ This columns value setter """ - self.value = val - - def _delval(self): - """ This columns value deleter """ - raise NotImplementedError - - def get_property(self, allow_delete=True): - """ - Returns the property object that will set and get this - column's value and be assigned to this column's model attribute - """ - getval = lambda slf: self._getval() - setval = lambda slf, val: self._setval(val) - delval = lambda slf: self._delval() - if not allow_delete: - return property(getval, setval) - return property(getval, setval, delval) - def get_default(self): if self.has_default: if callable(self.default): From dee0ace2d76e0101c6d31180e1787ce25fd97821 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Wed, 21 Nov 2012 07:40:09 -0800 Subject: [PATCH 0024/3726] working out queryoperator logic and structure updating filter arg parsing and storage adding where clause creation --- cqlengine/query.py | 108 ++++++++++++++++++++++--- cqlengine/tests/query/test_queryset.py | 5 ++ 2 files changed, 100 insertions(+), 13 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index f410391097..56a27e6192 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -1,5 +1,7 @@ from collections import namedtuple import copy +from hashlib import md5 +from time import time from cqlengine.connection import get_connection from cqlengine.exceptions import QueryException @@ -7,13 +9,65 @@ #CQL 3 reference: #http://www.datastax.com/docs/1.1/references/cql/index -WhereFilter = namedtuple('WhereFilter', ['column', 'operator', 'value']) - class QueryOperatorException(QueryException): pass class QueryOperator(object): + # The symbol that identifies this operator in filter kwargs + # ie: colname__ symbol = None + # The comparator symbol this operator + # uses in cql + cql_symbol = None + + def __init__(self, column, value): + self.column = column + self.value = value + + #the identifier is a unique key that will be used in string + #replacement on query strings, it's created from a hash + #of this object's id and the time + self.identifier = md5(str(id(self)) + str(time())).hexdigest() + + #perform validation on this operator + self.validate_operator() + self.validate_value() + + @property + def cql(self): + """ + Returns this operator's portion of the WHERE clause + :param valname: the dict key that this operator's compare value will be found in + """ + return '{} {} :{}'.format(self.column.db_field, self.cql_symbol, self.identifier) + + def validate_operator(self): + """ + Checks that this operator can be used on the column provided + """ + if self.symbol is None: + raise QueryOperatorException("{} is not a valid operator, use one with 'symbol' defined".format(self.__class__.__name__)) + if self.cql_symbol is None: + raise QueryOperatorException("{} is not a valid operator, use one with 'cql_symbol' defined".format(self.__class__.__name__)) + + def validate_value(self): + """ + Checks that the compare value works with this operator + + *doesn't do anything by default + """ + pass + + def get_dict(self): + """ + Returns this operators contribution to the cql.query arg dictionanry + + ie: if this column's name is colname, and the identifier is colval, + this should return the dict: {'colval':} + SELECT * FROM column_family WHERE colname=:colval + """ + return {self.identifier: self.value} + @classmethod def get_operator(cls, symbol): if not hasattr(cls, 'opmap'): @@ -26,33 +80,40 @@ def _recurse(klass): pass _recurse(QueryOperator) try: - return QueryOperator.opmap[symbol.upper()] + return QueryOperator.opmap[symbol.upper()](column) except KeyError: raise QueryOperatorException("{} doesn't map to a QueryOperator".format(symbol)) class EqualsOperator(QueryOperator): symbol = 'EQ' + cql_symbol = '=' class InOperator(QueryOperator): symbol = 'IN' + cql_symbol = 'IN' class GreaterThanOperator(QueryOperator): symbol = "GT" + cql_symbol = '>' class GreaterThanOrEqualOperator(QueryOperator): symbol = "GTE" + cql_symbol = '>=' class LessThanOperator(QueryOperator): symbol = "LT" + cql_symbol = '<' class LessThanOrEqualOperator(QueryOperator): symbol = "LTE" + cql_symbol = '<=' class QuerySet(object): #TODO: querysets should be immutable #TODO: querysets should be executed lazily #TODO: support specifying offset and limit (use slice) (maybe return a mutated queryset) #TODO: support specifying columns to exclude or select only + #TODO: cache results in this instance, but don't copy them on deepcopy #CQL supports ==, >, >=, <, <=, IN (a,b,c,..n) #REVERSE, LIMIT @@ -84,11 +145,26 @@ def _execute_query(self): conn = get_connection() self._cursor = conn.cursor() + def _validate_where_syntax(self): + """ + Checks that a filterset will not create invalid cql + """ + #TODO: check that there's either a = or IN relationship with a primary key or indexed field + def _where_clause(self): """ Returns a where clause based on the given filter args """ - pass + self._validate_where_syntax() + return ' AND '.join([f.cql for f in self._where]) + + def _where_values(self): + """ + Returns the value dict to be passed to the cql query + """ + values = {} + for where in self._where: values.update(where.get_dict()) + return values def _select_query(self): """ @@ -133,12 +209,17 @@ def all(self): clone._where = [] return clone - def _parse_filter_arg(self, arg, val): + def _parse_filter_arg(self, arg): + """ + Parses a filter arg in the format: + __ + :returns: colname, op tuple + """ statement = arg.split('__') if len(statement) == 1: - return WhereFilter(arg, None, val) + return arg, None elif len(statement) == 2: - return WhereFilter(statement[0], statement[1], val) + return statement[0], statement[1] else: raise QueryException("Can't parse '{}'".format(arg)) @@ -146,17 +227,18 @@ def filter(self, **kwargs): #add arguments to the where clause filters clone = copy.deepcopy(self) for arg, val in kwargs.items(): - raw_statement = self._parse_filter_arg(arg, val) + col_name, col_op = self._parse_filter_arg(arg) #resolve column and operator try: - column = self.model._columns[raw_statement.column] + column = self.model._columns[col_name] except KeyError: - raise QueryException("Can't resolve column name: '{}'".format(raw_statement.column)) + raise QueryException("Can't resolve column name: '{}'".format(col_name)) - operator = QueryOperator.get_operator(raw_statement.operator) + #get query operator, or use equals if not supplied + operator_class = QueryOperator.get_operator(col_op or 'EQ') + operator = operator_class(column, val) - statement = WhereFilter(column, operator, val) - clone._where.append(statement) + clone._where.append(operator) return clone diff --git a/cqlengine/tests/query/test_queryset.py b/cqlengine/tests/query/test_queryset.py index ef1bd9c3f2..7f6de601b8 100644 --- a/cqlengine/tests/query/test_queryset.py +++ b/cqlengine/tests/query/test_queryset.py @@ -11,6 +11,11 @@ def test_query_filter_parsing(self): Tests the queryset filter method """ + def test_using_invalid_column_names_in_filter_kwargs_raises_error(self): + """ + Tests that using invalid or nonexistant column names for filter args raises an error + """ + def test_where_clause_generation(self): """ Tests the where clause creation From 1fcf80909510872af1e60044d625ca6129683ea6 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 22 Nov 2012 12:39:22 -0800 Subject: [PATCH 0025/3726] adding select querystring generation moving the manager functionality into the queryset and modifying the model and query classes to work with it --- cqlengine/columns.py | 6 +- cqlengine/manager.py | 23 +---- cqlengine/models.py | 29 +++++- cqlengine/query.py | 93 ++++++++++++------- .../tests/model/test_class_construction.py | 7 +- cqlengine/tests/model/test_model_io.py | 4 + cqlengine/tests/query/test_queryoperators.py | 0 cqlengine/tests/query/test_queryset.py | 52 ++++++++++- requirements.txt | 2 + 9 files changed, 153 insertions(+), 63 deletions(-) create mode 100644 cqlengine/tests/query/test_queryoperators.py diff --git a/cqlengine/columns.py b/cqlengine/columns.py index 6ff219cfe9..e81e893005 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -42,9 +42,11 @@ class BaseColumn(object): instance_counter = 0 - def __init__(self, primary_key=False, db_field=None, default=None, null=False): + def __init__(self, primary_key=False, index=False, db_field=None, default=None, null=False): """ - :param primary_key: bool flag, there can be only one primary key per doc + :param primary_key: bool flag, indicates this column is a primary key. The first primary key defined + on a model is the partition key, all others are cluster keys + :param index: bool flag, indicates an index should be created for this column :param db_field: the fieldname this field will map to in the database :param default: the default value, can be a value or a callable (no args) :param null: boolean, is the field nullable? diff --git a/cqlengine/manager.py b/cqlengine/manager.py index 4d4cbb73bc..b2c2ee0b52 100644 --- a/cqlengine/manager.py +++ b/cqlengine/manager.py @@ -2,6 +2,7 @@ from cqlengine.query import QuerySet +#TODO: refactor this into the QuerySet class Manager(object): def __init__(self, model): @@ -27,22 +28,9 @@ def __call__(self, **kwargs): return self.filter(**kwargs) def find(self, pk): - """ - Returns the row corresponding to the primary key value given - """ - values = QuerySet(self.model).find(pk) - if values is None: return - - #change the column names to model names - #in case they are different - field_dict = {} - db_map = self.model._db_map - for key, val in values.items(): - if key in db_map: - field_dict[db_map[key]] = val - else: - field_dict[key] = val - return self.model(**field_dict) + """ Returns the row corresponding to the primary key set given """ + #TODO: rework this to work with multiple primary keys + return QuerySet(self.model).find(pk) def all(self): return QuerySet(self.model).all() @@ -50,9 +38,6 @@ def all(self): def filter(self, **kwargs): return QuerySet(self.model).filter(**kwargs) - def exclude(self, **kwargs): - return QuerySet(self.model).exclude(**kwargs) - def create(self, **kwargs): return self.model(**kwargs).save() diff --git a/cqlengine/models.py b/cqlengine/models.py index 3fa7c6bfcd..ca51a21e8c 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -2,7 +2,7 @@ from cqlengine import columns from cqlengine.exceptions import ModelException -from cqlengine.manager import Manager +from cqlengine.query import QuerySet class BaseModel(object): """ @@ -19,11 +19,28 @@ def __init__(self, **values): value_mngr = column.value_manager(self, column, values.get(name, None)) self._values[name] = value_mngr + #TODO: note any deferred or only fields so they're not deleted + @classmethod def find(cls, pk): """ Loads a document by it's primary key """ + #TODO: rework this to work with multiple primary keys cls.objects.find(pk) + @classmethod + def column_family_name(cls): + """ + Returns the column family name if it's been defined + otherwise, it creates it from the module and class name + """ + if cls.db_name: + return cls.db_name + cf_name = cls.__module__ + '.' + cls.__name__ + cf_name = cf_name.replace('.', '_') + #trim to less than 48 characters or cassandra will complain + cf_name = cf_name[-48:] + return cf_name + @property def pk(self): """ Returns the object's primary key """ @@ -45,12 +62,14 @@ def as_dict(self): def save(self): is_new = self.pk is None self.validate() - self.objects._save_instance(self) + #self.objects._save_instance(self) + self.objects.save(self) return self def delete(self): """ Deletes this instance """ - self.objects._delete_instance(self) + #self.objects._delete_instance(self) + self.objects.delete_instance(self) class ModelMetaClass(type): @@ -115,9 +134,9 @@ def _transform_column(col_name, col_obj): attrs['_pk_name'] = pk_name attrs['_dynamic_columns'] = {} - #create the class and add a manager to it + #create the class and add a QuerySet to it klass = super(ModelMetaClass, cls).__new__(cls, name, bases, attrs) - klass.objects = Manager(klass) + klass.objects = QuerySet(klass) return klass diff --git a/cqlengine/query.py b/cqlengine/query.py index 56a27e6192..7b6c91b0cf 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -80,7 +80,7 @@ def _recurse(klass): pass _recurse(QueryOperator) try: - return QueryOperator.opmap[symbol.upper()](column) + return QueryOperator.opmap[symbol.upper()] except KeyError: raise QueryOperatorException("{} doesn't map to a QueryOperator".format(symbol)) @@ -119,10 +119,10 @@ class QuerySet(object): #REVERSE, LIMIT #ORDER BY - def __init__(self, model, query_args={}): + def __init__(self, model): super(QuerySet, self).__init__() self.model = model - self.column_family_name = self.model.objects.column_family_name + self.column_family_name = self.model.column_family_name() #Where clause filters self._where = [] @@ -141,65 +141,82 @@ def __init__(self, model, query_args={}): self._cursor = None #----query generation / execution---- - def _execute_query(self): - conn = get_connection() - self._cursor = conn.cursor() def _validate_where_syntax(self): - """ - Checks that a filterset will not create invalid cql - """ + """ Checks that a filterset will not create invalid cql """ #TODO: check that there's either a = or IN relationship with a primary key or indexed field + #TODO: abuse this to see if we can get cql to raise an exception def _where_clause(self): - """ - Returns a where clause based on the given filter args - """ + """ Returns a where clause based on the given filter args """ self._validate_where_syntax() return ' AND '.join([f.cql for f in self._where]) def _where_values(self): - """ - Returns the value dict to be passed to the cql query - """ + """ Returns the value dict to be passed to the cql query """ values = {} - for where in self._where: values.update(where.get_dict()) + for where in self._where: + values.update(where.get_dict()) return values - def _select_query(self): + def _select_query(self, count=False): """ Returns a select clause based on the given filter args + + :param count: indicates this should return a count query only """ - pass + qs = [] + if count: + qs += ['SELECT COUNT(*)'] + else: + fields = self.models._columns.keys() + if self._defer_fields: + fields = [f for f in fields if f not in self._defer_fields] + elif self._only_fields: + fields = [f for f in fields if f in self._only_fields] + db_fields = [self.model._columns[f].db_fields for f in fields] + qs += ['SELECT {}'.format(', '.join(db_fields))] - @property - def cursor(self): - if self._cursor is None: - self._cursor = self._execute_query() - return self._cursor + qs += ['FROM {}'.format(self.column_family_name)] + + if self._where: + qs += ['WHERE {}'.format(self._where_clause())] + + #TODO: add support for limit, start, order by, and reverse + return ' '.join(qs) #----Reads------ def __iter__(self): if self._cursor is None: - self._execute_query() + conn = get_connection() + self._cursor = conn.cursor() + self._cursor.execute(self._select_query(), self._where_values()) return self + def _construct_instance(self, values): + #translate column names to model names + field_dict = {} + db_map = self.model._db_map + for key, val in values.items(): + if key in db_map: + field_dict[db_map[key]] = val + else: + field_dict[key] = val + return self.model(**field_dict) + def _get_next(self): - """ - Gets the next cursor result - Returns a db_field->value dict - """ + """ Gets the next cursor result """ cur = self._cursor values = cur.fetchone() if values is None: return names = [i[0] for i in cur.description] value_dict = dict(zip(names, values)) - return value_dict + return self._construct_instance(value_dict) def next(self): - values = self._get_next() - if values is None: raise StopIteration - return values + instance = self._get_next() + if instance is None: raise StopIteration + return instance def first(self): pass @@ -242,15 +259,22 @@ def filter(self, **kwargs): return clone + def __call__(self, **kwargs): + return self.filter(**kwargs) + def count(self): """ Returns the number of rows matched by this query """ - qs = 'SELECT COUNT(*) FROM {}'.format(self.column_family_name) + con = get_connection() + cur = con.cursor() + cur.execute(self._select_query(count=True), self._where_values()) + return cur.fetchone() def find(self, pk): """ loads one document identified by it's primary key """ #TODO: make this a convenience wrapper of the filter method + #TODO: rework this to work with multiple primary keys qs = 'SELECT * FROM {column_family} WHERE {pk_name}=:{pk_name}' qs = qs.format(column_family=self.column_family_name, pk_name=self.model._pk_name) @@ -326,6 +350,9 @@ def save(self, instance): cur = conn.cursor() cur.execute(qs, field_values) + def create(self, **kwargs): + return self.model(**kwargs).save() + #----delete--- def delete(self, columns=[]): """ diff --git a/cqlengine/tests/model/test_class_construction.py b/cqlengine/tests/model/test_class_construction.py index 949e40fa77..9b8f4f359f 100644 --- a/cqlengine/tests/model/test_class_construction.py +++ b/cqlengine/tests/model/test_class_construction.py @@ -18,7 +18,7 @@ def test_column_attributes_handled_correctly(self): class TestModel(Model): text = columns.Text() - #check class attributes + #check class attibutes self.assertHasAttr(TestModel, '_columns') self.assertHasAttr(TestModel, 'id') self.assertHasAttr(TestModel, 'text') @@ -79,6 +79,11 @@ class Stuff(Model): self.assertEquals(inst1.num, 5) self.assertEquals(inst2.num, 7) + def test_normal_fields_can_be_defined_between_primary_keys(self): + """ + Tests tha non primary key fields can be defined between primary key fields + """ + def test_meta_data_is_not_inherited(self): """ Test that metadata defined in one class, is not inherited by subclasses diff --git a/cqlengine/tests/model/test_model_io.py b/cqlengine/tests/model/test_model_io.py index a080726ba6..5d0824203d 100644 --- a/cqlengine/tests/model/test_model_io.py +++ b/cqlengine/tests/model/test_model_io.py @@ -52,3 +52,7 @@ def test_nullable_columns_are_saved_properly(self): Tests that nullable columns save without any trouble """ + def test_column_deleting_works_properly(self): + """ + """ + diff --git a/cqlengine/tests/query/test_queryoperators.py b/cqlengine/tests/query/test_queryoperators.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cqlengine/tests/query/test_queryset.py b/cqlengine/tests/query/test_queryset.py index 7f6de601b8..fe58dc725e 100644 --- a/cqlengine/tests/query/test_queryset.py +++ b/cqlengine/tests/query/test_queryset.py @@ -3,25 +3,58 @@ from cqlengine.exceptions import ModelException from cqlengine.models import Model from cqlengine import columns +from cqlengine import query + +class TestModel(Model): + test_id = columns.Integer(primary_key=True) + attempt_id = columns.Integer(primary_key=True) + descriptions = columns.Text() + expected_result = columns.Integer() + test_result = columns.Integer(index=True) class TestQuerySet(BaseCassEngTestCase): def test_query_filter_parsing(self): """ - Tests the queryset filter method + Tests the queryset filter method parses it's kwargs properly """ + query1 = TestModel.objects(test_id=5) + assert len(query1._where) == 1 + + op = query1._where[0] + assert isinstance(op, query.EqualsOperator) + assert op.value == 5 + + query2 = query1.filter(expected_result__gte=1) + assert len(query2._where) == 2 + + op = query2._where[1] + assert isinstance(op, query.GreaterThanOrEqualOperator) + assert op.value == 1 def test_using_invalid_column_names_in_filter_kwargs_raises_error(self): """ Tests that using invalid or nonexistant column names for filter args raises an error """ + with self.assertRaises(query.QueryException): + query0 = TestModel.objects(nonsense=5) def test_where_clause_generation(self): """ Tests the where clause creation """ + query1 = TestModel.objects(test_id=5) + ids = [o.identifier for o in query1._where] + where = query1._where_clause() + assert where == 'test_id = :{}'.format(*ids) + + query2 = query1.filter(expected_result__gte=1) + ids = [o.identifier for o in query2._where] + where = query2._where_clause() + assert where == 'test_id = :{} AND expected_result >= :{}'.format(*ids) - def test_querystring_generation(self): + + def test_querystring_construction(self): """ Tests the select querystring creation """ @@ -30,6 +63,11 @@ def test_queryset_is_immutable(self): """ Tests that calling a queryset function that changes it's state returns a new queryset """ + query1 = TestModel.objects(test_id=5) + assert len(query1._where) == 1 + + query2 = query1.filter(expected_result__gte=1) + assert len(query2._where) == 2 def test_queryset_slicing(self): """ @@ -45,13 +83,21 @@ def test_the_all_method_clears_where_filter(self): """ Tests that calling all on a queryset with previously defined filters returns a queryset with no filters """ + query1 = TestModel.objects(test_id=5) + assert len(query1._where) == 1 + + query2 = query1.filter(expected_result__gte=1) + assert len(query2._where) == 2 + + query3 = query2.all() + assert len(query3._where) == 0 def test_defining_only_and_defer_fails(self): """ Tests that trying to add fields to either only or defer, or doing so more than once fails """ - def test_defining_only_or_defer_fields_fails(self): + def test_defining_only_or_defer_on_nonexistant_fields_fails(self): """ Tests that setting only or defer fields that don't exist raises an exception """ diff --git a/requirements.txt b/requirements.txt index b6734c8844..8411d39556 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1 +1,3 @@ cql==1.2.0 +ipython==0.13.1 +ipdb==0.7 From b7d91fe869fef8fcdebafc83b771db907df73f16 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 22 Nov 2012 23:16:49 -0800 Subject: [PATCH 0026/3726] writing query delete method added queryset usage tests refactoring how column names are stored and calculated --- cqlengine/columns.py | 13 ++++-- cqlengine/models.py | 13 ++---- cqlengine/query.py | 54 ++++++++++++++-------- cqlengine/tests/query/test_queryset.py | 63 ++++++++++++++++++++++++-- 4 files changed, 111 insertions(+), 32 deletions(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index e81e893005..0eeb8051d0 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -56,6 +56,9 @@ def __init__(self, primary_key=False, index=False, db_field=None, default=None, self.default = default self.null = null + #the column name in the model definition + self.column_name = None + self.value = None #keep track of instantiation order @@ -112,17 +115,21 @@ def get_column_def(self): """ Returns a column definition for CQL table definition """ - dterms = [self.db_field, self.db_type] + dterms = [self.db_field_name, self.db_type] #if self.primary_key: #dterms.append('PRIMARY KEY') return ' '.join(dterms) - def set_db_name(self, name): + def set_column_name(self, name): """ Sets the column name during document class construction This value will be ignored if db_field is set in __init__ """ - self.db_field = self.db_field or name + self.column_name = name + + @property + def db_field_name(self): + return self.db_field or self.column_name class Bytes(BaseColumn): db_type = 'blob' diff --git a/cqlengine/models.py b/cqlengine/models.py index ca51a21e8c..2d9de4ed5a 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -62,13 +62,11 @@ def as_dict(self): def save(self): is_new = self.pk is None self.validate() - #self.objects._save_instance(self) self.objects.save(self) return self def delete(self): """ Deletes this instance """ - #self.objects._delete_instance(self) self.objects.delete_instance(self) @@ -84,7 +82,7 @@ def __new__(cls, name, bases, attrs): def _transform_column(col_name, col_obj): _columns[col_name] = col_obj - col_obj.set_db_name(col_name) + col_obj.set_column_name(col_name) #set properties _get = lambda self: self._values[col_name].getval() _set = lambda self, val: self._values[col_name].setval(val) @@ -94,7 +92,6 @@ def _transform_column(col_name, col_obj): else: attrs[col_name] = property(_get, _set, _del) - column_definitions = [(k,v) for k,v in attrs.items() if isinstance(v, columns.BaseColumn)] column_definitions = sorted(column_definitions, lambda x,y: cmp(x[1].position, y[1].position)) @@ -116,9 +113,9 @@ def _transform_column(col_name, col_obj): #check for duplicate column names col_names = set() for k,v in _columns.items(): - if v.db_field in col_names: - raise ModelException("{} defines the column {} more than once".format(name, v.db_field)) - col_names.add(v.db_field) + if v.db_field_name in col_names: + raise ModelException("{} defines the column {} more than once".format(name, v.db_field_name)) + col_names.add(v.db_field_name) #get column family name cf_name = attrs.pop('db_name', name) @@ -126,7 +123,7 @@ def _transform_column(col_name, col_obj): #create db_name -> model name map for loading db_map = {} for name, col in _columns.items(): - db_map[col.db_field] = name + db_map[col.db_field_name] = name #add management members to the class attrs['_columns'] = _columns diff --git a/cqlengine/query.py b/cqlengine/query.py index 7b6c91b0cf..b2c8c9e89a 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -39,7 +39,7 @@ def cql(self): Returns this operator's portion of the WHERE clause :param valname: the dict key that this operator's compare value will be found in """ - return '{} {} :{}'.format(self.column.db_field, self.cql_symbol, self.identifier) + return '{} {} :{}'.format(self.column.db_field_name, self.cql_symbol, self.identifier) def validate_operator(self): """ @@ -140,6 +140,12 @@ def __init__(self, model): self._cursor = None + def __unicode__(self): + return self._select_query() + + def __str__(self): + return str(self.__unicode__()) + #----query generation / execution---- def _validate_where_syntax(self): @@ -169,12 +175,12 @@ def _select_query(self, count=False): if count: qs += ['SELECT COUNT(*)'] else: - fields = self.models._columns.keys() + fields = self.model._columns.keys() if self._defer_fields: fields = [f for f in fields if f not in self._defer_fields] elif self._only_fields: fields = [f for f in fields if f in self._only_fields] - db_fields = [self.model._columns[f].db_fields for f in fields] + db_fields = [self.model._columns[f].db_field_name for f in fields] qs += ['SELECT {}'.format(', '.join(db_fields))] qs += ['FROM {}'.format(self.column_family_name)] @@ -182,7 +188,10 @@ def _select_query(self, count=False): if self._where: qs += ['WHERE {}'.format(self._where_clause())] - #TODO: add support for limit, start, order by, and reverse + if not count: + #TODO: add support for limit, start, order by, and reverse + pass + return ' '.join(qs) #----Reads------ @@ -191,6 +200,7 @@ def __iter__(self): conn = get_connection() self._cursor = conn.cursor() self._cursor.execute(self._select_query(), self._where_values()) + self._rowcount = self._cursor.rowcount return self def _construct_instance(self, values): @@ -267,7 +277,10 @@ def count(self): con = get_connection() cur = con.cursor() cur.execute(self._select_query(count=True), self._where_values()) - return cur.fetchone() + return cur.fetchone()[0] + + def __len__(self): + return self.count() def find(self, pk): """ @@ -319,23 +332,14 @@ def save(self, instance): prior to calling this. """ assert type(instance) == self.model + #organize data value_pairs = [] - - #get pk - col = self.model._columns[self.model._pk_name] values = instance.as_dict() - value_pairs += [(col.db_field, values.get(self.model._pk_name))] #get defined fields and their column names for name, col in self.model._columns.items(): - if col.is_primary_key: continue - value_pairs += [(col.db_field, values.get(name))] - - #add dynamic fields - for key, val in values.items(): - if key in self.model._columns: continue - value_pairs += [(key, val)] + value_pairs += [(col.db_field_name, values.get(name))] #construct query string field_names = zip(*value_pairs)[0] @@ -357,7 +361,21 @@ def create(self, **kwargs): def delete(self, columns=[]): """ Deletes the contents of a query + + :returns: number of rows deleted """ + qs = ['DELETE FROM {}'.format(self.column_family_name)] + if self._where: + qs += ['WHERE {}'.format(self._where_clause())] + qs = ' '.join(qs) + + #TODO: Return number of rows deleted + con = get_connection() + cur = con.cursor() + cur.execute(qs, self._where_values()) + return cur.fetchone() + + def delete_instance(self, instance): """ Deletes one instance """ @@ -378,8 +396,8 @@ def _create_column_family(self): pkeys = [] qtypes = [] def add_column(col): - s = '{} {}'.format(col.db_field, col.db_type) - if col.primary_key: pkeys.append(col.db_field) + s = '{} {}'.format(col.db_field_name, col.db_type) + if col.primary_key: pkeys.append(col.db_field_name) qtypes.append(s) #add_column(self.model._columns[self.model._pk_name]) for name, col in self.model._columns.items(): diff --git a/cqlengine/tests/query/test_queryset.py b/cqlengine/tests/query/test_queryset.py index fe58dc725e..9ac767b51f 100644 --- a/cqlengine/tests/query/test_queryset.py +++ b/cqlengine/tests/query/test_queryset.py @@ -8,11 +8,11 @@ class TestModel(Model): test_id = columns.Integer(primary_key=True) attempt_id = columns.Integer(primary_key=True) - descriptions = columns.Text() + description = columns.Text() expected_result = columns.Integer() test_result = columns.Integer(index=True) -class TestQuerySet(BaseCassEngTestCase): +class TestQuerySetOperation(BaseCassEngTestCase): def test_query_filter_parsing(self): """ @@ -54,7 +54,7 @@ def test_where_clause_generation(self): assert where == 'test_id = :{} AND expected_result >= :{}'.format(*ids) - def test_querystring_construction(self): + def test_querystring_generation(self): """ Tests the select querystring creation """ @@ -101,3 +101,60 @@ def test_defining_only_or_defer_on_nonexistant_fields_fails(self): """ Tests that setting only or defer fields that don't exist raises an exception """ + +class TestQuerySetUsage(BaseCassEngTestCase): + + @classmethod + def setUpClass(cls): + super(TestQuerySetUsage, cls).setUpClass() + try: TestModel.objects._delete_column_family() + except: pass + TestModel.objects._create_column_family() + + TestModel.objects.create(test_id=0, attempt_id=0, description='try1', expected_result=5, test_result=30) + TestModel.objects.create(test_id=0, attempt_id=1, description='try2', expected_result=10, test_result=30) + TestModel.objects.create(test_id=0, attempt_id=2, description='try3', expected_result=15, test_result=30) + TestModel.objects.create(test_id=0, attempt_id=3, description='try4', expected_result=20, test_result=25) + + TestModel.objects.create(test_id=1, attempt_id=0, description='try5', expected_result=5, test_result=25) + TestModel.objects.create(test_id=1, attempt_id=1, description='try6', expected_result=10, test_result=25) + TestModel.objects.create(test_id=1, attempt_id=2, description='try7', expected_result=15, test_result=25) + TestModel.objects.create(test_id=1, attempt_id=3, description='try8', expected_result=20, test_result=20) + + TestModel.objects.create(test_id=2, attempt_id=0, description='try9', expected_result=50, test_result=40) + TestModel.objects.create(test_id=2, attempt_id=1, description='try10', expected_result=60, test_result=40) + TestModel.objects.create(test_id=2, attempt_id=2, description='try11', expected_result=70, test_result=45) + TestModel.objects.create(test_id=2, attempt_id=3, description='try12', expected_result=75, test_result=45) + + @classmethod + def tearDownClass(cls): + super(TestQuerySetUsage, cls).tearDownClass() + TestModel.objects._delete_column_family() + + def test_count(self): + q = TestModel.objects(test_id=0) + assert q.count() == 4 + + def test_iteration(self): + q = TestModel.objects(test_id=0) + #tuple of expected attempt_id, expected_result values + import ipdb; ipdb.set_trace() + compare_set = set([(0,5), (1,10), (2,15), (3,20)]) + for t in q: + val = t.attempt_id, t.expected_result + assert val in compare_set + compare_set.remove(val) + assert len(compare_set) == 0 + + q = TestModel.objects(attempt_id=3) + assert len(q) == 3 + #tuple of expected test_id, expected_result values + compare_set = set([(0,20), (1,20), (2,75)]) + for t in q: + val = t.test_id, t.expected_result + assert val in compare_set + compare_set.remove(val) + assert len(compare_set) == 0 + + + From 277f1496e08117d0fd0721305bbb125e11227891 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Fri, 23 Nov 2012 13:37:45 -0800 Subject: [PATCH 0027/3726] made keyspace a model attribute made keyspace a required parameter for get_connection moved create/delete column family and keyspaces into a management module --- cqlengine/connection.py | 46 +++++++++----- cqlengine/management.py | 50 +++++++++++++++ cqlengine/models.py | 5 +- cqlengine/query.py | 62 ++++++------------- .../tests/model/test_class_construction.py | 5 ++ cqlengine/tests/model/test_model_io.py | 15 ++++- cqlengine/tests/query/test_queryset.py | 10 +-- 7 files changed, 125 insertions(+), 68 deletions(-) create mode 100644 cqlengine/management.py diff --git a/cqlengine/connection.py b/cqlengine/connection.py index 3cebc3acbf..19a767ac45 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -3,25 +3,39 @@ #http://cassandra.apache.org/doc/cql/CQL.html import cql +from cqlengine.exceptions import CQLEngineException + +class CQLConnectionError(CQLEngineException): pass _keyspace = 'cassengine_test' -_conn = None -def get_connection(): - global _conn - if _conn is None: - _conn = cql.connect('127.0.0.1', 9160) - _conn.set_cql_version('3.0.0') - try: - _conn.set_initial_keyspace(_keyspace) - except cql.ProgrammingError, e: - #http://www.datastax.com/docs/1.0/references/cql/CREATE_KEYSPACE - cur = _conn.cursor() - cur.execute("""create keyspace {} - with strategy_class = 'SimpleStrategy' - and strategy_options:replication_factor=1;""".format(_keyspace)) - _conn.set_initial_keyspace(_keyspace) - return _conn +#TODO: look into the cql connection pool class +_conn = {} +def get_connection(keyspace, create_missing_keyspace=True): + con = _conn.get(keyspace) + if con is None: + con = cql.connect('127.0.0.1', 9160) + con.set_cql_version('3.0.0') + + if keyspace: + try: + con.set_initial_keyspace(keyspace) + except cql.ProgrammingError, e: + if create_missing_keyspace: + from cqlengine.management import create_keyspace + create_keyspace(keyspace) + #http://www.datastax.com/docs/1.0/references/cql/CREATE_KEYSPACE + cur = con.cursor() + cur.execute("""create keyspace {} + with strategy_class = 'SimpleStrategy' + and strategy_options:replication_factor=1;""".format(keyspace)) + con.set_initial_keyspace(keyspace) + else: + raise CQLConnectionError('"{}" is not an existing keyspace'.format(keyspace)) + + _conn[keyspace] = con + + return con #cli examples here: #http://wiki.apache.org/cassandra/CassandraCli diff --git a/cqlengine/management.py b/cqlengine/management.py new file mode 100644 index 0000000000..07c24acd6e --- /dev/null +++ b/cqlengine/management.py @@ -0,0 +1,50 @@ +from cqlengine.connection import get_connection + + +def create_keyspace(name): + con = get_connection(None) + cur = con.cursor() + cur.execute("""create keyspace {} + with strategy_class = 'SimpleStrategy' + and strategy_options:replication_factor=1;""".format(name)) + +def delete_keyspace(name): + pass + +def create_column_family(model): + #TODO: check for existing column family + #construct query string + qs = ['CREATE TABLE {}'.format(model.column_family_name())] + + #add column types + pkeys = [] + qtypes = [] + def add_column(col): + s = '{} {}'.format(col.db_field_name, col.db_type) + if col.primary_key: pkeys.append(col.db_field_name) + qtypes.append(s) + for name, col in model._columns.items(): + add_column(col) + + qtypes.append('PRIMARY KEY ({})'.format(', '.join(pkeys))) + + qs += ['({})'.format(', '.join(qtypes))] + qs = ' '.join(qs) + + #add primary key + conn = get_connection(model.keyspace) + cur = conn.cursor() + try: + cur.execute(qs) + except BaseException, e: + if 'Cannot add already existing column family' not in e.message: + raise + + +def delete_column_family(model): + #TODO: check that model exists + conn = get_connection(model.keyspace) + cur = conn.cursor() + cur.execute('drop table {};'.format(model.column_family_name())) + + pass diff --git a/cqlengine/models.py b/cqlengine/models.py index 2d9de4ed5a..7b2fa8ead1 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -9,10 +9,13 @@ class BaseModel(object): The base model class, don't inherit from this, inherit from Model, defined below """ - #table names will be generated automatically from it's model name and package + #table names will be generated automatically from it's model and package name #however, you can alse define them manually here db_name = None + #the keyspace for this model + keyspace = 'cqlengine' + def __init__(self, **values): self._values = {} for name, column in self._columns.items(): diff --git a/cqlengine/query.py b/cqlengine/query.py index b2c8c9e89a..71853fe169 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -46,15 +46,23 @@ def validate_operator(self): Checks that this operator can be used on the column provided """ if self.symbol is None: - raise QueryOperatorException("{} is not a valid operator, use one with 'symbol' defined".format(self.__class__.__name__)) + raise QueryOperatorException( + "{} is not a valid operator, use one with 'symbol' defined".format( + self.__class__.__name__ + ) + ) if self.cql_symbol is None: - raise QueryOperatorException("{} is not a valid operator, use one with 'cql_symbol' defined".format(self.__class__.__name__)) + raise QueryOperatorException( + "{} is not a valid operator, use one with 'cql_symbol' defined".format( + self.__class__.__name__ + ) + ) def validate_value(self): """ Checks that the compare value works with this operator - *doesn't do anything by default + Doesn't do anything by default """ pass @@ -109,7 +117,6 @@ class LessThanOrEqualOperator(QueryOperator): cql_symbol = '<=' class QuerySet(object): - #TODO: querysets should be immutable #TODO: querysets should be executed lazily #TODO: support specifying offset and limit (use slice) (maybe return a mutated queryset) #TODO: support specifying columns to exclude or select only @@ -195,9 +202,10 @@ def _select_query(self, count=False): return ' '.join(qs) #----Reads------ + def __iter__(self): if self._cursor is None: - conn = get_connection() + conn = get_connection(self.model.keyspace) self._cursor = conn.cursor() self._cursor.execute(self._select_query(), self._where_values()) self._rowcount = self._cursor.rowcount @@ -274,7 +282,7 @@ def __call__(self, **kwargs): def count(self): """ Returns the number of rows matched by this query """ - con = get_connection() + con = get_connection(self.model.keyspace) cur = con.cursor() cur.execute(self._select_query(count=True), self._where_values()) return cur.fetchone()[0] @@ -291,7 +299,7 @@ def find(self, pk): qs = 'SELECT * FROM {column_family} WHERE {pk_name}=:{pk_name}' qs = qs.format(column_family=self.column_family_name, pk_name=self.model._pk_name) - conn = get_connection() + conn = get_connection(self.model.keyspace) self._cursor = conn.cursor() self._cursor.execute(qs, {self.model._pk_name:pk}) return self._get_next() @@ -350,7 +358,7 @@ def save(self, instance): qs += ["({})".format(', '.join([':'+f for f in field_names]))] qs = ' '.join(qs) - conn = get_connection() + conn = get_connection(self.model.keyspace) cur = conn.cursor() cur.execute(qs, field_values) @@ -370,7 +378,7 @@ def delete(self, columns=[]): qs = ' '.join(qs) #TODO: Return number of rows deleted - con = get_connection() + con = get_connection(self.model.keyspace) cur = con.cursor() cur.execute(qs, self._where_values()) return cur.fetchone() @@ -384,42 +392,8 @@ def delete_instance(self, instance): qs += ['WHERE {0}=:{0}'.format(pk_name)] qs = ' '.join(qs) - conn = get_connection() + conn = get_connection(self.model.keyspace) cur = conn.cursor() cur.execute(qs, {pk_name:instance.pk}) - def _create_column_family(self): - #construct query string - qs = ['CREATE TABLE {}'.format(self.column_family_name)] - - #add column types - pkeys = [] - qtypes = [] - def add_column(col): - s = '{} {}'.format(col.db_field_name, col.db_type) - if col.primary_key: pkeys.append(col.db_field_name) - qtypes.append(s) - #add_column(self.model._columns[self.model._pk_name]) - for name, col in self.model._columns.items(): - add_column(col) - - qtypes.append('PRIMARY KEY ({})'.format(', '.join(pkeys))) - - qs += ['({})'.format(', '.join(qtypes))] - qs = ' '.join(qs) - - #add primary key - conn = get_connection() - cur = conn.cursor() - try: - cur.execute(qs) - except BaseException, e: - if 'Cannot add already existing column family' not in e.message: - raise - - def _delete_column_family(self): - conn = get_connection() - cur = conn.cursor() - cur.execute('drop table {};'.format(self.column_family_name)) - diff --git a/cqlengine/tests/model/test_class_construction.py b/cqlengine/tests/model/test_class_construction.py index 9b8f4f359f..45b83d3d5c 100644 --- a/cqlengine/tests/model/test_class_construction.py +++ b/cqlengine/tests/model/test_class_construction.py @@ -84,6 +84,11 @@ def test_normal_fields_can_be_defined_between_primary_keys(self): Tests tha non primary key fields can be defined between primary key fields """ + def test_model_keyspace_attribute_must_be_a_string(self): + """ + Tests that users can't set the keyspace to None, or something else + """ + def test_meta_data_is_not_inherited(self): """ Test that metadata defined in one class, is not inherited by subclasses diff --git a/cqlengine/tests/model/test_model_io.py b/cqlengine/tests/model/test_model_io.py index 5d0824203d..fe558eb936 100644 --- a/cqlengine/tests/model/test_model_io.py +++ b/cqlengine/tests/model/test_model_io.py @@ -1,6 +1,9 @@ from unittest import skip from cqlengine.tests.base import BaseCassEngTestCase +from cqlengine.management import create_column_family +from cqlengine.management import delete_column_family +from cqlengine.models import Model from cqlengine.models import Model from cqlengine import columns @@ -12,9 +15,15 @@ class TestModel(Model): class TestModelIO(BaseCassEngTestCase): - def setUp(self): - super(TestModelIO, self).setUp() - TestModel.objects._create_column_family() + @classmethod + def setUpClass(cls): + super(TestModelIO, cls).setUpClass() + create_column_family(TestModel) + + @classmethod + def tearDownClass(cls): + super(TestModelIO, cls).tearDownClass() + delete_column_family(TestModel) def test_model_save_and_load(self): """ diff --git a/cqlengine/tests/query/test_queryset.py b/cqlengine/tests/query/test_queryset.py index 9ac767b51f..b59f9ab7ee 100644 --- a/cqlengine/tests/query/test_queryset.py +++ b/cqlengine/tests/query/test_queryset.py @@ -1,6 +1,8 @@ from cqlengine.tests.base import BaseCassEngTestCase from cqlengine.exceptions import ModelException +from cqlengine.management import create_column_family +from cqlengine.management import delete_column_family from cqlengine.models import Model from cqlengine import columns from cqlengine import query @@ -107,9 +109,9 @@ class TestQuerySetUsage(BaseCassEngTestCase): @classmethod def setUpClass(cls): super(TestQuerySetUsage, cls).setUpClass() - try: TestModel.objects._delete_column_family() + try: delete_column_family(TestModel) except: pass - TestModel.objects._create_column_family() + create_column_family(TestModel) TestModel.objects.create(test_id=0, attempt_id=0, description='try1', expected_result=5, test_result=30) TestModel.objects.create(test_id=0, attempt_id=1, description='try2', expected_result=10, test_result=30) @@ -129,7 +131,8 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): super(TestQuerySetUsage, cls).tearDownClass() - TestModel.objects._delete_column_family() + #TestModel.objects._delete_column_family() + delete_column_family(TestModel) def test_count(self): q = TestModel.objects(test_id=0) @@ -138,7 +141,6 @@ def test_count(self): def test_iteration(self): q = TestModel.objects(test_id=0) #tuple of expected attempt_id, expected_result values - import ipdb; ipdb.set_trace() compare_set = set([(0,5), (1,10), (2,15), (3,20)]) for t in q: val = t.attempt_id, t.expected_result From d67006919a723d831cd5c6d68b6bf5128a2cdb7e Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Fri, 23 Nov 2012 14:19:40 -0800 Subject: [PATCH 0028/3726] refactoring exceptions --- cqlengine/exceptions.py | 1 - cqlengine/query.py | 3 ++- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cqlengine/exceptions.py b/cqlengine/exceptions.py index 3a5444e9b1..0b1ff55f1e 100644 --- a/cqlengine/exceptions.py +++ b/cqlengine/exceptions.py @@ -3,4 +3,3 @@ class CQLEngineException(BaseException): pass class ModelException(CQLEngineException): pass class ValidationError(CQLEngineException): pass -class QueryException(CQLEngineException): pass diff --git a/cqlengine/query.py b/cqlengine/query.py index 71853fe169..86837e93f9 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -4,11 +4,12 @@ from time import time from cqlengine.connection import get_connection -from cqlengine.exceptions import QueryException +from cqlengine.exceptions import CQLEngineException #CQL 3 reference: #http://www.datastax.com/docs/1.1/references/cql/index +class QueryException(CQLEngineException): pass class QueryOperatorException(QueryException): pass class QueryOperator(object): From fdae4779048e00f80173d0f94a3bd7bdb178710e Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Fri, 23 Nov 2012 16:17:26 -0800 Subject: [PATCH 0029/3726] adding column indexing support adding some where clause validation added supporting tests --- cqlengine/columns.py | 7 ++ cqlengine/management.py | 75 +++++++++------- cqlengine/models.py | 17 +++- cqlengine/query.py | 8 +- .../tests/model/test_class_construction.py | 5 ++ cqlengine/tests/query/test_queryset.py | 87 +++++++++++++++++-- 6 files changed, 157 insertions(+), 42 deletions(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index 0eeb8051d0..9ce1bb6bf1 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -52,6 +52,7 @@ def __init__(self, primary_key=False, index=False, db_field=None, default=None, :param null: boolean, is the field nullable? """ self.primary_key = primary_key + self.index = index self.db_field = db_field self.default = default self.null = null @@ -129,8 +130,14 @@ def set_column_name(self, name): @property def db_field_name(self): + """ Returns the name of the cql name of this column """ return self.db_field or self.column_name + @property + def db_index_name(self): + """ Returns the name of the cql index """ + return 'index_{}'.format(self.db_field_name) + class Bytes(BaseColumn): db_type = 'blob' diff --git a/cqlengine/management.py b/cqlengine/management.py index 07c24acd6e..29fe02ba66 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -1,50 +1,65 @@ from cqlengine.connection import get_connection - def create_keyspace(name): con = get_connection(None) cur = con.cursor() - cur.execute("""create keyspace {} - with strategy_class = 'SimpleStrategy' - and strategy_options:replication_factor=1;""".format(name)) + cur.execute("""CREATE KEYSPACE {} + WITH strategy_class = 'SimpleStrategy' + AND strategy_options:replication_factor=1;""".format(name)) def delete_keyspace(name): - pass + con = get_connection(None) + cur = con.cursor() + cur.execute("DROP KEYSPACE {}".format(name)) def create_column_family(model): - #TODO: check for existing column family #construct query string - qs = ['CREATE TABLE {}'.format(model.column_family_name())] + cf_name = model.column_family_name() - #add column types - pkeys = [] - qtypes = [] - def add_column(col): - s = '{} {}'.format(col.db_field_name, col.db_type) - if col.primary_key: pkeys.append(col.db_field_name) - qtypes.append(s) - for name, col in model._columns.items(): - add_column(col) + conn = get_connection(model.keyspace) + cur = conn.cursor() - qtypes.append('PRIMARY KEY ({})'.format(', '.join(pkeys))) + #check for an existing column family + ks_info = conn.client.describe_keyspace(model.keyspace) + if not any([cf_name == cf.name for cf in ks_info.cf_defs]): + qs = ['CREATE TABLE {}'.format(cf_name)] - qs += ['({})'.format(', '.join(qtypes))] - qs = ' '.join(qs) + #add column types + pkeys = [] + qtypes = [] + def add_column(col): + s = '{} {}'.format(col.db_field_name, col.db_type) + if col.primary_key: pkeys.append(col.db_field_name) + qtypes.append(s) + for name, col in model._columns.items(): + add_column(col) + + qtypes.append('PRIMARY KEY ({})'.format(', '.join(pkeys))) + + qs += ['({})'.format(', '.join(qtypes))] + qs = ' '.join(qs) - #add primary key - conn = get_connection(model.keyspace) - cur = conn.cursor() - try: cur.execute(qs) - except BaseException, e: - if 'Cannot add already existing column family' not in e.message: - raise + + indexes = [c for n,c in model._columns.items() if c.index] + if indexes: + import ipdb; ipdb.set_trace() + for column in indexes: + #TODO: check for existing index... + #can that be determined from the connection client? + qs = ['CREATE INDEX {}'.format(column.db_index_name)] + qs += ['ON {}'.format(cf_name)] + qs += ['({})'.format(column.db_field_name)] + qs = ' '.join(qs) + cur.execute(qs) def delete_column_family(model): - #TODO: check that model exists + #check that model exists + cf_name = model.column_family_name() conn = get_connection(model.keyspace) - cur = conn.cursor() - cur.execute('drop table {};'.format(model.column_family_name())) + ks_info = conn.client.describe_keyspace(model.keyspace) + if any([cf_name == cf.name for cf in ks_info.cf_defs]): + cur = conn.cursor() + cur.execute('drop table {};'.format(cf_name)) - pass diff --git a/cqlengine/models.py b/cqlengine/models.py index 7b2fa8ead1..862f8e340c 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -4,6 +4,8 @@ from cqlengine.exceptions import ModelException from cqlengine.query import QuerySet +class ModelDefinitionException(ModelException): pass + class BaseModel(object): """ The base model class, don't inherit from this, inherit from Model, defined below @@ -37,12 +39,12 @@ def column_family_name(cls): otherwise, it creates it from the module and class name """ if cls.db_name: - return cls.db_name + return cls.db_name.lower() cf_name = cls.__module__ + '.' + cls.__name__ cf_name = cf_name.replace('.', '_') #trim to less than 48 characters or cassandra will complain cf_name = cf_name[-48:] - return cf_name + return cf_name.lower() @property def pk(self): @@ -66,6 +68,7 @@ def save(self): is_new = self.pk is None self.validate() self.objects.save(self) + #delete any fields that have been deleted / set to none return self def delete(self): @@ -120,13 +123,19 @@ def _transform_column(col_name, col_obj): raise ModelException("{} defines the column {} more than once".format(name, v.db_field_name)) col_names.add(v.db_field_name) + #check for indexes on models with multiple primary keys + if len([1 for k,v in column_definitions if v.primary_key]) > 1: + if len([1 for k,v in column_definitions if v.index]) > 0: + raise ModelDefinitionException( + 'Indexes on models with multiple primary keys is not supported') + #get column family name cf_name = attrs.pop('db_name', name) #create db_name -> model name map for loading db_map = {} - for name, col in _columns.items(): - db_map[col.db_field_name] = name + for field_name, col in _columns.items(): + db_map[col.db_field_name] = field_name #add management members to the class attrs['_columns'] = _columns diff --git a/cqlengine/query.py b/cqlengine/query.py index 86837e93f9..83f78ef7f2 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -97,7 +97,7 @@ class EqualsOperator(QueryOperator): symbol = 'EQ' cql_symbol = '=' -class InOperator(QueryOperator): +class InOperator(EqualsOperator): symbol = 'IN' cql_symbol = 'IN' @@ -158,7 +158,11 @@ def __str__(self): def _validate_where_syntax(self): """ Checks that a filterset will not create invalid cql """ - #TODO: check that there's either a = or IN relationship with a primary key or indexed field + + #check that there's either a = or IN relationship with a primary key or indexed field + equal_ops = [w for w in self._where if isinstance(w, EqualsOperator)] + if not any([w.column.primary_key or w.column.index for w in equal_ops]): + raise QueryException('Where clauses require either a "=" or "IN" comparison with either a primary key or indexed field') #TODO: abuse this to see if we can get cql to raise an exception def _where_clause(self): diff --git a/cqlengine/tests/model/test_class_construction.py b/cqlengine/tests/model/test_class_construction.py index 45b83d3d5c..d2d88a0d98 100644 --- a/cqlengine/tests/model/test_class_construction.py +++ b/cqlengine/tests/model/test_class_construction.py @@ -89,6 +89,11 @@ def test_model_keyspace_attribute_must_be_a_string(self): Tests that users can't set the keyspace to None, or something else """ + def test_indexes_arent_allowed_on_models_with_multiple_primary_keys(self): + """ + Tests that attempting to define an index on a model with multiple primary keys fails + """ + def test_meta_data_is_not_inherited(self): """ Test that metadata defined in one class, is not inherited by subclasses diff --git a/cqlengine/tests/query/test_queryset.py b/cqlengine/tests/query/test_queryset.py index b59f9ab7ee..03fdb39674 100644 --- a/cqlengine/tests/query/test_queryset.py +++ b/cqlengine/tests/query/test_queryset.py @@ -12,6 +12,13 @@ class TestModel(Model): attempt_id = columns.Integer(primary_key=True) description = columns.Text() expected_result = columns.Integer() + test_result = columns.Integer() + +class IndexedTestModel(Model): + test_id = columns.Integer(primary_key=True) + attempt_id = columns.Integer(index=True) + description = columns.Text() + expected_result = columns.Integer() test_result = columns.Integer(index=True) class TestQuerySetOperation(BaseCassEngTestCase): @@ -104,14 +111,15 @@ def test_defining_only_or_defer_on_nonexistant_fields_fails(self): Tests that setting only or defer fields that don't exist raises an exception """ -class TestQuerySetUsage(BaseCassEngTestCase): +class BaseQuerySetUsage(BaseCassEngTestCase): @classmethod def setUpClass(cls): - super(TestQuerySetUsage, cls).setUpClass() - try: delete_column_family(TestModel) - except: pass + super(BaseQuerySetUsage, cls).setUpClass() + delete_column_family(TestModel) + delete_column_family(IndexedTestModel) create_column_family(TestModel) + create_column_family(IndexedTestModel) TestModel.objects.create(test_id=0, attempt_id=0, description='try1', expected_result=5, test_result=30) TestModel.objects.create(test_id=0, attempt_id=1, description='try2', expected_result=10, test_result=30) @@ -128,13 +136,32 @@ def setUpClass(cls): TestModel.objects.create(test_id=2, attempt_id=2, description='try11', expected_result=70, test_result=45) TestModel.objects.create(test_id=2, attempt_id=3, description='try12', expected_result=75, test_result=45) + IndexedTestModel.objects.create(test_id=0, attempt_id=0, description='try1', expected_result=5, test_result=30) + IndexedTestModel.objects.create(test_id=1, attempt_id=1, description='try2', expected_result=10, test_result=30) + IndexedTestModel.objects.create(test_id=2, attempt_id=2, description='try3', expected_result=15, test_result=30) + IndexedTestModel.objects.create(test_id=3, attempt_id=3, description='try4', expected_result=20, test_result=25) + + IndexedTestModel.objects.create(test_id=4, attempt_id=0, description='try5', expected_result=5, test_result=25) + IndexedTestModel.objects.create(test_id=5, attempt_id=1, description='try6', expected_result=10, test_result=25) + IndexedTestModel.objects.create(test_id=6, attempt_id=2, description='try7', expected_result=15, test_result=25) + IndexedTestModel.objects.create(test_id=7, attempt_id=3, description='try8', expected_result=20, test_result=20) + + IndexedTestModel.objects.create(test_id=8, attempt_id=0, description='try9', expected_result=50, test_result=40) + IndexedTestModel.objects.create(test_id=9, attempt_id=1, description='try10', expected_result=60, test_result=40) + IndexedTestModel.objects.create(test_id=10, attempt_id=2, description='try11', expected_result=70, test_result=45) + IndexedTestModel.objects.create(test_id=11, attempt_id=3, description='try12', expected_result=75, test_result=45) + @classmethod def tearDownClass(cls): - super(TestQuerySetUsage, cls).tearDownClass() - #TestModel.objects._delete_column_family() + super(BaseQuerySetUsage, cls).tearDownClass() delete_column_family(TestModel) + delete_column_family(IndexedTestModel) + +class TestQuerySetCountAndSelection(BaseQuerySetUsage): def test_count(self): + assert TestModel.objects.count() == 12 + q = TestModel.objects(test_id=0) assert q.count() == 4 @@ -157,6 +184,54 @@ def test_iteration(self): assert val in compare_set compare_set.remove(val) assert len(compare_set) == 0 + + def test_delete(self): + TestModel.objects.create(test_id=3, attempt_id=0, description='try9', expected_result=50, test_result=40) + TestModel.objects.create(test_id=3, attempt_id=1, description='try10', expected_result=60, test_result=40) + TestModel.objects.create(test_id=3, attempt_id=2, description='try11', expected_result=70, test_result=45) + TestModel.objects.create(test_id=3, attempt_id=3, description='try12', expected_result=75, test_result=45) + + assert TestModel.objects.count() == 16 + assert TestModel.objects(test_id=3).count() == 4 + + TestModel.objects(test_id=3).delete() + + assert TestModel.objects.count() == 12 + assert TestModel.objects(test_id=3).count() == 0 + +class TestQuerySetValidation(BaseQuerySetUsage): + + def test_primary_key_or_index_must_be_specified(self): + """ + Tests that queries that don't have an equals relation to a primary key or indexed field fail + """ + with self.assertRaises(query.QueryException): + q = TestModel.objects(test_result=25) + iter(q) + + def test_primary_key_or_index_must_have_equal_relation_filter(self): + """ + Tests that queries that don't have non equal (>,<, etc) relation to a primary key or indexed field fail + """ + with self.assertRaises(query.QueryException): + q = TestModel.objects(test_id__gt=0) + iter(q) + + + def test_indexed_field_can_be_queried(self): + """ + Tests that queries on an indexed field will work without any primary key relations specified + """ + q = IndexedTestModel.objects(test_result=25) + count = q.count() + assert q.count() == 4 + + + + + + + From aa6b151d068f17218d9e83279a9521f1c70bdccf Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Fri, 23 Nov 2012 16:18:14 -0800 Subject: [PATCH 0030/3726] removing the manager file --- cqlengine/manager.py | 65 -------------------------------------------- 1 file changed, 65 deletions(-) delete mode 100644 cqlengine/manager.py diff --git a/cqlengine/manager.py b/cqlengine/manager.py deleted file mode 100644 index b2c2ee0b52..0000000000 --- a/cqlengine/manager.py +++ /dev/null @@ -1,65 +0,0 @@ -#manager class - -from cqlengine.query import QuerySet - -#TODO: refactor this into the QuerySet -class Manager(object): - - def __init__(self, model): - super(Manager, self).__init__() - self.model = model - - @property - def column_family_name(self): - """ - Returns the column family name if it's been defined - otherwise, it creates it from the module and class name - """ - if self.model.db_name: - return self.model.db_name - cf_name = self.model.__module__ + '.' + self.model.__name__ - cf_name = cf_name.replace('.', '_') - #trim to less than 48 characters or cassandra will complain - cf_name = cf_name[-48:] - return cf_name - - def __call__(self, **kwargs): - """ filter shortcut """ - return self.filter(**kwargs) - - def find(self, pk): - """ Returns the row corresponding to the primary key set given """ - #TODO: rework this to work with multiple primary keys - return QuerySet(self.model).find(pk) - - def all(self): - return QuerySet(self.model).all() - - def filter(self, **kwargs): - return QuerySet(self.model).filter(**kwargs) - - def create(self, **kwargs): - return self.model(**kwargs).save() - - def delete(self, **kwargs): - pass - - #----single instance methods---- - def _save_instance(self, instance): - """ - The business end of save, this is called by the models - save method and calls the Query save method. This should - only be called by the model saving itself - """ - QuerySet(self.model).save(instance) - - def _delete_instance(self, instance): - """ Deletes a single instance """ - QuerySet(self.model).delete_instance(instance) - - #----column family create/delete---- - def _create_column_family(self): - QuerySet(self.model)._create_column_family() - - def _delete_column_family(self): - QuerySet(self.model)._delete_column_family() From f75442456f8ebbfad3510cb1c42d4672671f677f Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Fri, 23 Nov 2012 17:40:15 -0800 Subject: [PATCH 0031/3726] cleaning up --- cqlengine/columns.py | 21 ++------------------- cqlengine/connection.py | 38 -------------------------------------- cqlengine/management.py | 1 - cqlengine/query.py | 6 +----- 4 files changed, 3 insertions(+), 63 deletions(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index 9ce1bb6bf1..bf27a87460 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -221,24 +221,7 @@ def __init__(self, **kwargs): class Counter(BaseColumn): def __init__(self, **kwargs): - super(DateTime, self).__init__(**kwargs) - raise NotImplementedError - -#TODO: research supercolumns -#http://wiki.apache.org/cassandra/DataModel -#checkout composite columns: -#http://www.datastax.com/dev/blog/introduction-to-composite-columns-part-1 -class List(BaseColumn): - #checkout cql.cqltypes.ListType - def __init__(self, **kwargs): - super(DateTime, self).__init__(**kwargs) - raise NotImplementedError - -class Dict(BaseColumn): - #checkout cql.cqltypes.MapType - def __init__(self, **kwargs): - super(DateTime, self).__init__(**kwargs) + super(Counter, self).__init__(**kwargs) raise NotImplementedError -#checkout cql.cqltypes.SetType -#checkout cql.cqltypes.CompositeType +#TODO: Foreign key fields diff --git a/cqlengine/connection.py b/cqlengine/connection.py index 19a767ac45..dc1a7467fb 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -24,12 +24,6 @@ def get_connection(keyspace, create_missing_keyspace=True): if create_missing_keyspace: from cqlengine.management import create_keyspace create_keyspace(keyspace) - #http://www.datastax.com/docs/1.0/references/cql/CREATE_KEYSPACE - cur = con.cursor() - cur.execute("""create keyspace {} - with strategy_class = 'SimpleStrategy' - and strategy_options:replication_factor=1;""".format(keyspace)) - con.set_initial_keyspace(keyspace) else: raise CQLConnectionError('"{}" is not an existing keyspace'.format(keyspace)) @@ -37,35 +31,3 @@ def get_connection(keyspace, create_missing_keyspace=True): return con -#cli examples here: -#http://wiki.apache.org/cassandra/CassandraCli -#http://www.datastax.com/docs/1.0/dml/using_cql -#http://stackoverflow.com/questions/10871595/how-do-you-create-a-counter-columnfamily-with-cql3-in-cassandra -""" -try: - cur.execute("create table colfam (id int PRIMARY KEY);") -except cql.ProgrammingError: - #cur.execute('drop table colfam;') - pass -""" - -#http://www.datastax.com/docs/1.0/references/cql/INSERT -#updates/inserts do the same thing - -""" -cur.execute('insert into colfam (id, content) values (:id, :content)', dict(id=1, content='yo!')) - -cur.execute('select * from colfam WHERE id=1;') -cur.fetchone() - -cur.execute("update colfam set content='hey' where id=1;") -cur.execute('select * from colfam WHERE id=1;') -cur.fetchone() - -cur.execute('delete from colfam WHERE id=1;') -cur.execute('delete from colfam WHERE id in (1);') -""" - -#TODO: Add alter altering existing schema functionality -#http://www.datastax.com/docs/1.0/references/cql/ALTER_COLUMNFAMILY - diff --git a/cqlengine/management.py b/cqlengine/management.py index 29fe02ba66..5b2ad6aa42 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -43,7 +43,6 @@ def add_column(col): indexes = [c for n,c in model._columns.items() if c.index] if indexes: - import ipdb; ipdb.set_trace() for column in indexes: #TODO: check for existing index... #can that be determined from the connection client? diff --git a/cqlengine/query.py b/cqlengine/query.py index 83f78ef7f2..ce6a859087 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -118,15 +118,11 @@ class LessThanOrEqualOperator(QueryOperator): cql_symbol = '<=' class QuerySet(object): - #TODO: querysets should be executed lazily #TODO: support specifying offset and limit (use slice) (maybe return a mutated queryset) #TODO: support specifying columns to exclude or select only + #TODO: support ORDER BY #TODO: cache results in this instance, but don't copy them on deepcopy - #CQL supports ==, >, >=, <, <=, IN (a,b,c,..n) - #REVERSE, LIMIT - #ORDER BY - def __init__(self, model): super(QuerySet, self).__init__() self.model = model From e0530a883ad500628c19bf5fd051e668a54bfd40 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Fri, 23 Nov 2012 21:19:46 -0800 Subject: [PATCH 0032/3726] removing the find method adding initial support for cached queryset results added a dirty fix for iterating over a queryset more than once --- cqlengine/models.py | 6 --- cqlengine/query.py | 52 ++++++++++++++------------ cqlengine/tests/model/test_model_io.py | 6 +-- cqlengine/tests/query/test_queryset.py | 23 ++++++++++++ 4 files changed, 55 insertions(+), 32 deletions(-) diff --git a/cqlengine/models.py b/cqlengine/models.py index 862f8e340c..9111b7596c 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -26,12 +26,6 @@ def __init__(self, **values): #TODO: note any deferred or only fields so they're not deleted - @classmethod - def find(cls, pk): - """ Loads a document by it's primary key """ - #TODO: rework this to work with multiple primary keys - cls.objects.find(pk) - @classmethod def column_family_name(cls): """ diff --git a/cqlengine/query.py b/cqlengine/query.py index ce6a859087..8ee3d6653d 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -142,6 +142,9 @@ def __init__(self, model): self._defer_fields = [] self._only_fields = [] + #results cache + self._result_cache = None + self._cursor = None def __unicode__(self): @@ -150,6 +153,22 @@ def __unicode__(self): def __str__(self): return str(self.__unicode__()) + def __call__(self, **kwargs): + return self.filter(**kwargs) + + def __deepcopy__(self, memo): + clone = self.__class__(self.model) + for k,v in self.__dict__.items(): + if k in ['_result_cache']: + clone.__dict__[k] = None + else: + clone.__dict__[k] = copy.deepcopy(v, memo) + + return clone + + def __len__(self): + return self.count() + #----query generation / execution---- def _validate_where_syntax(self): @@ -205,6 +224,7 @@ def _select_query(self, count=False): #----Reads------ def __iter__(self): + #TODO: cache results if self._cursor is None: conn = get_connection(self.model.keyspace) self._cursor = conn.cursor() @@ -234,11 +254,14 @@ def _get_next(self): def next(self): instance = self._get_next() - if instance is None: raise StopIteration + if instance is None: + #TODO: this is inefficient, we should be caching the results + self._cursor = None + raise StopIteration return instance def first(self): - pass + return iter(self)._get_next() def all(self): clone = copy.deepcopy(self) @@ -278,11 +301,9 @@ def filter(self, **kwargs): return clone - def __call__(self, **kwargs): - return self.filter(**kwargs) - def count(self): """ Returns the number of rows matched by this query """ + #TODO: check for previous query execution and return row count if it exists con = get_connection(self.model.keyspace) cur = con.cursor() cur.execute(self._select_query(count=True), self._where_values()) @@ -291,20 +312,6 @@ def count(self): def __len__(self): return self.count() - def find(self, pk): - """ - loads one document identified by it's primary key - """ - #TODO: make this a convenience wrapper of the filter method - #TODO: rework this to work with multiple primary keys - qs = 'SELECT * FROM {column_family} WHERE {pk_name}=:{pk_name}' - qs = qs.format(column_family=self.column_family_name, - pk_name=self.model._pk_name) - conn = get_connection(self.model.keyspace) - self._cursor = conn.cursor() - self._cursor.execute(qs, {self.model._pk_name:pk}) - return self._get_next() - def _only_or_defer(self, action, fields): clone = copy.deepcopy(self) if clone._defer_fields or clone._only_fields: @@ -313,7 +320,9 @@ def _only_or_defer(self, action, fields): #check for strange fields missing_fields = [f for f in fields if f not in self.model._columns.keys()] if missing_fields: - raise QueryException("Can't resolve fields {} in {}".format(', '.join(missing_fields), self.model.__name__)) + raise QueryException( + "Can't resolve fields {} in {}".format( + ', '.join(missing_fields), self.model.__name__)) if action == 'defer': clone._defer_fields = fields @@ -378,12 +387,9 @@ def delete(self, columns=[]): qs += ['WHERE {}'.format(self._where_clause())] qs = ' '.join(qs) - #TODO: Return number of rows deleted con = get_connection(self.model.keyspace) cur = con.cursor() cur.execute(qs, self._where_values()) - return cur.fetchone() - def delete_instance(self, instance): diff --git a/cqlengine/tests/model/test_model_io.py b/cqlengine/tests/model/test_model_io.py index fe558eb936..f02a61ec43 100644 --- a/cqlengine/tests/model/test_model_io.py +++ b/cqlengine/tests/model/test_model_io.py @@ -30,7 +30,7 @@ def test_model_save_and_load(self): Tests that models can be saved and retrieved """ tm = TestModel.objects.create(count=8, text='123456789') - tm2 = TestModel.objects.find(tm.pk) + tm2 = TestModel.objects(id=tm.pk).first() for cname in tm._columns.keys(): self.assertEquals(getattr(tm, cname), getattr(tm2, cname)) @@ -44,7 +44,7 @@ def test_model_updating_works_properly(self): tm.count = 100 tm.save() - tm2 = TestModel.objects.find(tm.pk) + tm2 = TestModel.objects(id=tm.pk).first() self.assertEquals(tm.count, tm2.count) def test_model_deleting_works_properly(self): @@ -53,7 +53,7 @@ def test_model_deleting_works_properly(self): """ tm = TestModel.objects.create(count=8, text='123456789') tm.delete() - tm2 = TestModel.objects.find(tm.pk) + tm2 = TestModel.objects(id=tm.pk).first() self.assertIsNone(tm2) def test_nullable_columns_are_saved_properly(self): diff --git a/cqlengine/tests/query/test_queryset.py b/cqlengine/tests/query/test_queryset.py index 03fdb39674..8d91d2da60 100644 --- a/cqlengine/tests/query/test_queryset.py +++ b/cqlengine/tests/query/test_queryset.py @@ -199,6 +199,29 @@ def test_delete(self): assert TestModel.objects.count() == 12 assert TestModel.objects(test_id=3).count() == 0 +class TestQuerySetIterator(BaseQuerySetUsage): + + def test_multiple_iterations_work_properly(self): + """ Tests that iterating over a query set more than once works """ + q = TestModel.objects(test_id=0) + #tuple of expected attempt_id, expected_result values + compare_set = set([(0,5), (1,10), (2,15), (3,20)]) + for t in q: + val = t.attempt_id, t.expected_result + assert val in compare_set + compare_set.remove(val) + assert len(compare_set) == 0 + + #try it again + compare_set = set([(0,5), (1,10), (2,15), (3,20)]) + for t in q: + val = t.attempt_id, t.expected_result + assert val in compare_set + compare_set.remove(val) + assert len(compare_set) == 0 + + + class TestQuerySetValidation(BaseQuerySetUsage): def test_primary_key_or_index_must_be_specified(self): From 854de8d375ae25168b7da633b631dbf5a68d8bd3 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Fri, 23 Nov 2012 21:33:43 -0800 Subject: [PATCH 0033/3726] adding some todos --- cqlengine/columns.py | 17 +++++++++++------ cqlengine/query.py | 7 +++---- cqlengine/tests/model/test_model_io.py | 2 -- 3 files changed, 14 insertions(+), 12 deletions(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index bf27a87460..a1ba0a3c57 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -55,6 +55,8 @@ def __init__(self, primary_key=False, index=False, db_field=None, default=None, self.index = index self.db_field = db_field self.default = default + + #TODO: make this required, since fields won't actually be nulled, but deleted self.null = null #the column name in the model definition @@ -116,10 +118,7 @@ def get_column_def(self): """ Returns a column definition for CQL table definition """ - dterms = [self.db_field_name, self.db_type] - #if self.primary_key: - #dterms.append('PRIMARY KEY') - return ' '.join(dterms) + return '{} {}'.format(self.db_field_name, self.db_type) def set_column_name(self, name): """ @@ -214,14 +213,20 @@ def to_database(self, value): class Decimal(BaseColumn): db_type = 'decimal' - #TODO: this + #TODO: decimal field def __init__(self, **kwargs): super(DateTime, self).__init__(**kwargs) raise NotImplementedError class Counter(BaseColumn): + #TODO: counter field def __init__(self, **kwargs): super(Counter, self).__init__(**kwargs) raise NotImplementedError -#TODO: Foreign key fields +class ForeignKey(BaseColumn): + #TODO: Foreign key field + def __init__(self, **kwargs): + super(ForeignKey, self).__init__(**kwargs) + raise NotImplementedError + diff --git a/cqlengine/query.py b/cqlengine/query.py index 8ee3d6653d..c9195c9465 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -17,8 +17,7 @@ class QueryOperator(object): # ie: colname__ symbol = None - # The comparator symbol this operator - # uses in cql + # The comparator symbol this operator uses in cql cql_symbol = None def __init__(self, column, value): @@ -372,6 +371,8 @@ def save(self, instance): cur = conn.cursor() cur.execute(qs, field_values) + #TODO: delete deleted / nulled fields + def create(self, **kwargs): return self.model(**kwargs).save() @@ -379,8 +380,6 @@ def create(self, **kwargs): def delete(self, columns=[]): """ Deletes the contents of a query - - :returns: number of rows deleted """ qs = ['DELETE FROM {}'.format(self.column_family_name)] if self._where: diff --git a/cqlengine/tests/model/test_model_io.py b/cqlengine/tests/model/test_model_io.py index f02a61ec43..cf43f96dc0 100644 --- a/cqlengine/tests/model/test_model_io.py +++ b/cqlengine/tests/model/test_model_io.py @@ -11,8 +11,6 @@ class TestModel(Model): count = columns.Integer() text = columns.Text() -#class TestModel2(Model): - class TestModelIO(BaseCassEngTestCase): @classmethod From 394266ab983e53c7ce579072fa033ac2e95c97e5 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sat, 24 Nov 2012 10:28:52 -0800 Subject: [PATCH 0034/3726] adding support for order_by --- cqlengine/query.py | 81 ++++++++++++++++++- .../tests/model/test_class_construction.py | 5 ++ cqlengine/tests/query/test_queryset.py | 28 +++++++ 3 files changed, 111 insertions(+), 3 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index c9195c9465..46891787a5 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -119,7 +119,6 @@ class LessThanOrEqualOperator(QueryOperator): class QuerySet(object): #TODO: support specifying offset and limit (use slice) (maybe return a mutated queryset) #TODO: support specifying columns to exclude or select only - #TODO: support ORDER BY #TODO: cache results in this instance, but don't copy them on deepcopy def __init__(self, model): @@ -131,7 +130,7 @@ def __init__(self, model): self._where = [] #ordering arguments - self._order = [] + self._order = None #subset selection self._limit = None @@ -215,7 +214,9 @@ def _select_query(self, count=False): qs += ['WHERE {}'.format(self._where_clause())] if not count: - #TODO: add support for limit, start, order by, and reverse + if self._order: + qs += ['ORDER BY {}'.format(self._order)] + #TODO: add support for limit, start, and reverse pass return ' '.join(qs) @@ -231,6 +232,14 @@ def __iter__(self): self._rowcount = self._cursor.rowcount return self + def __getitem__(self, s): + + if isinstance(s, slice): + pass + else: + s = long(s) + pass + def _construct_instance(self, values): #translate column names to model names field_dict = {} @@ -300,6 +309,39 @@ def filter(self, **kwargs): return clone + def order_by(self, colname): + """ + orders the result set. + ordering can only select one column, and it must be the second column in a composite primary key + + Default order is ascending, prepend a '-' to the column name for descending + """ + if colname is None: + clone = copy.deepcopy(self) + clone._order = None + return clone + + order_type = 'DESC' if colname.startswith('-') else 'ASC' + colname = colname.replace('-', '') + + column = self.model._columns.get(colname) + if column is None: + raise QueryException("Can't resolve the column name: '{}'".format(colname)) + + #validate the column selection + if not column.primary_key: + raise QueryException( + "Can't order on '{}', can only order on (clustered) primary keys".format(colname)) + + pks = [v for k,v in self.model._columns.items() if v.primary_key] + if column == pks[0]: + raise QueryException( + "Can't order by the first primary key, clustering (secondary) keys only") + + clone = copy.deepcopy(self) + clone._order = '{} {}'.format(column.db_field_name, order_type) + return clone + def count(self): """ Returns the number of rows matched by this query """ #TODO: check for previous query execution and return row count if it exists @@ -311,6 +353,39 @@ def count(self): def __len__(self): return self.count() +# def set_limits(self, start, limit): +# """ Sets the start and limit parameters """ +# #validate +# if not (start is None or isinstance(start, (int, long))): +# raise QueryException("'start' must be None, or an int or long") +# if not (limit is None or isinstance(limit, (int, long))): +# raise QueryException("'limit' must be None, or an int or long") +# +# clone = copy.deepcopy(self) +# clone._start = start +# clone._limit = limit +# return clone +# +# def start(self, v): +# """ Sets the limit """ +# if not (v is None or isinstance(v, (int, long))): +# raise TypeError +# if v == self._start: +# return self +# clone = copy.deepcopy(self) +# clone._start = v +# return clone +# +# def limit(self, v): +# """ Sets the limit """ +# if not (v is None or isinstance(v, (int, long))): +# raise TypeError +# if v == self._limit: +# return self +# clone = copy.deepcopy(self) +# clone._limit = v +# return clone + def _only_or_defer(self, action, fields): clone = copy.deepcopy(self) if clone._defer_fields or clone._only_fields: diff --git a/cqlengine/tests/model/test_class_construction.py b/cqlengine/tests/model/test_class_construction.py index d2d88a0d98..fd218e1a5c 100644 --- a/cqlengine/tests/model/test_class_construction.py +++ b/cqlengine/tests/model/test_class_construction.py @@ -84,6 +84,11 @@ def test_normal_fields_can_be_defined_between_primary_keys(self): Tests tha non primary key fields can be defined between primary key fields """ + def test_at_least_one_non_primary_key_column_is_required(self): + """ + Tests that an error is raised if a model doesn't contain at least one primary key field + """ + def test_model_keyspace_attribute_must_be_a_string(self): """ Tests that users can't set the keyspace to None, or something else diff --git a/cqlengine/tests/query/test_queryset.py b/cqlengine/tests/query/test_queryset.py index 8d91d2da60..256e681697 100644 --- a/cqlengine/tests/query/test_queryset.py +++ b/cqlengine/tests/query/test_queryset.py @@ -220,7 +220,35 @@ def test_multiple_iterations_work_properly(self): compare_set.remove(val) assert len(compare_set) == 0 +class TestQuerySetOrdering(BaseQuerySetUsage): + def test_order_by_success_case(self): + + q = TestModel.objects(test_id=0).order_by('attempt_id') + expected_order = [0,1,2,3] + for model, expect in zip(q,expected_order): + assert model.attempt_id == expect + + q = q.order_by('-attempt_id') + expected_order.reverse() + for model, expect in zip(q,expected_order): + assert model.attempt_id == expect + + def test_ordering_by_non_second_primary_keys_fail(self): + + with self.assertRaises(query.QueryException): + q = TestModel.objects(test_id=0).order_by('test_id') + + def test_ordering_by_non_primary_keys_fails(self): + with self.assertRaises(query.QueryException): + q = TestModel.objects(test_id=0).order_by('description') + + def test_ordering_on_indexed_columns_fails(self): + with self.assertRaises(query.QueryException): + q = IndexedTestModel.objects(test_id=0).order_by('attempt_id') + +class TestQuerySetSlicing(BaseQuerySetUsage): + pass class TestQuerySetValidation(BaseQuerySetUsage): From 301aa5c1dfb22a4009a7fa7e1a48e4d7e59d69dd Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sat, 24 Nov 2012 16:58:03 -0800 Subject: [PATCH 0035/3726] adding support for select LIMIT --- cqlengine/query.py | 108 ++++++++++++++++++++------------------------- 1 file changed, 47 insertions(+), 61 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index 46891787a5..7efd1929a9 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -117,7 +117,7 @@ class LessThanOrEqualOperator(QueryOperator): cql_symbol = '<=' class QuerySet(object): - #TODO: support specifying offset and limit (use slice) (maybe return a mutated queryset) + #TODO: delete empty columns on save #TODO: support specifying columns to exclude or select only #TODO: cache results in this instance, but don't copy them on deepcopy @@ -132,9 +132,9 @@ def __init__(self, model): #ordering arguments self._order = None - #subset selection - self._limit = None - self._start = None + #CQL has a default limit of 10000, it's defined here + #because explicit is better than implicit + self._limit = 10000 #see the defer and only methods self._defer_fields = [] @@ -190,34 +190,28 @@ def _where_values(self): values.update(where.get_dict()) return values - def _select_query(self, count=False): + def _select_query(self): """ Returns a select clause based on the given filter args - - :param count: indicates this should return a count query only """ - qs = [] - if count: - qs += ['SELECT COUNT(*)'] - else: - fields = self.model._columns.keys() - if self._defer_fields: - fields = [f for f in fields if f not in self._defer_fields] - elif self._only_fields: - fields = [f for f in fields if f in self._only_fields] - db_fields = [self.model._columns[f].db_field_name for f in fields] - qs += ['SELECT {}'.format(', '.join(db_fields))] - + fields = self.model._columns.keys() + if self._defer_fields: + fields = [f for f in fields if f not in self._defer_fields] + elif self._only_fields: + fields = [f for f in fields if f in self._only_fields] + db_fields = [self.model._columns[f].db_field_name for f in fields] + + qs = ['SELECT {}'.format(', '.join(db_fields))] qs += ['FROM {}'.format(self.column_family_name)] if self._where: qs += ['WHERE {}'.format(self._where_clause())] - if not count: - if self._order: - qs += ['ORDER BY {}'.format(self._order)] - #TODO: add support for limit, start, and reverse - pass + if self._order: + qs += ['ORDER BY {}'.format(self._order)] + + if self._limit: + qs += ['LIMIT {}'.format(self._limit)] return ' '.join(qs) @@ -235,10 +229,15 @@ def __iter__(self): def __getitem__(self, s): if isinstance(s, slice): - pass + #return a new query with limit defined + #start and step are not supported + if s.start: raise QueryException('CQL does not support START') + if s.step: raise QueryException('step is not supported') + return self.limit(s.stop) else: + #return the object at this index s = long(s) - pass + raise NotImplementedError def _construct_instance(self, values): #translate column names to model names @@ -345,46 +344,33 @@ def order_by(self, colname): def count(self): """ Returns the number of rows matched by this query """ #TODO: check for previous query execution and return row count if it exists + qs = ['SELECT COUNT(*)'] + qs += ['FROM {}'.format(self.column_family_name)] + if self._where: + qs += ['WHERE {}'.format(self._where_clause())] + qs = ' '.join(qs) + con = get_connection(self.model.keyspace) cur = con.cursor() - cur.execute(self._select_query(count=True), self._where_values()) + cur.execute(qs, self._where_values()) return cur.fetchone()[0] - def __len__(self): - return self.count() + def limit(self, v): + """ + Sets the limit on the number of results returned + CQL has a default limit of 10,000 + """ + if not (v is None or isinstance(v, (int, long))): + raise TypeError + if v == self._limit: + return self + + if v < 0: + raise QueryException("Negative limit is not allowed") -# def set_limits(self, start, limit): -# """ Sets the start and limit parameters """ -# #validate -# if not (start is None or isinstance(start, (int, long))): -# raise QueryException("'start' must be None, or an int or long") -# if not (limit is None or isinstance(limit, (int, long))): -# raise QueryException("'limit' must be None, or an int or long") -# -# clone = copy.deepcopy(self) -# clone._start = start -# clone._limit = limit -# return clone -# -# def start(self, v): -# """ Sets the limit """ -# if not (v is None or isinstance(v, (int, long))): -# raise TypeError -# if v == self._start: -# return self -# clone = copy.deepcopy(self) -# clone._start = v -# return clone -# -# def limit(self, v): -# """ Sets the limit """ -# if not (v is None or isinstance(v, (int, long))): -# raise TypeError -# if v == self._limit: -# return self -# clone = copy.deepcopy(self) -# clone._limit = v -# return clone + clone = copy.deepcopy(self) + clone._limit = v + return clone def _only_or_defer(self, action, fields): clone = copy.deepcopy(self) From 2cbf582b0dfc3d05c8494506c54386efd68f1832 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sat, 24 Nov 2012 17:33:14 -0800 Subject: [PATCH 0036/3726] adding column delete on save --- cqlengine/columns.py | 1 + cqlengine/models.py | 18 +++++++++++------- cqlengine/query.py | 20 +++++++++++++++++++- cqlengine/tests/model/test_model_io.py | 7 +------ cqlengine/tests/query/test_queryset.py | 10 ---------- 5 files changed, 32 insertions(+), 24 deletions(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index a1ba0a3c57..9236d44e2e 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -12,6 +12,7 @@ def __init__(self, instance, column, value): self.initial_value = value self.value = value + @property def deleted(self): return self.value is None and self.initial_value is not None diff --git a/cqlengine/models.py b/cqlengine/models.py index 9111b7596c..3893bb9e0a 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -24,7 +24,7 @@ def __init__(self, **values): value_mngr = column.value_manager(self, column, values.get(name, None)) self._values[name] = value_mngr - #TODO: note any deferred or only fields so they're not deleted + #TODO: note any absent fields so they're not deleted @classmethod def column_family_name(cls): @@ -75,13 +75,16 @@ class ModelMetaClass(type): def __new__(cls, name, bases, attrs): """ """ - #move column definitions into _columns dict + #move column definitions into columns dict #and set default column names - _columns = OrderedDict() + columns = OrderedDict() + primary_keys = OrderedDict() pk_name = None def _transform_column(col_name, col_obj): - _columns[col_name] = col_obj + columns[col_name] = col_obj + if col_obj.primary_key: + primary_keys[col_name] = col_obj col_obj.set_column_name(col_name) #set properties _get = lambda self: self._values[col_name].getval() @@ -112,7 +115,7 @@ def _transform_column(col_name, col_obj): #check for duplicate column names col_names = set() - for k,v in _columns.items(): + for v in columns.values(): if v.db_field_name in col_names: raise ModelException("{} defines the column {} more than once".format(name, v.db_field_name)) col_names.add(v.db_field_name) @@ -128,11 +131,12 @@ def _transform_column(col_name, col_obj): #create db_name -> model name map for loading db_map = {} - for field_name, col in _columns.items(): + for field_name, col in columns.items(): db_map[col.db_field_name] = field_name #add management members to the class - attrs['_columns'] = _columns + attrs['_columns'] = columns + attrs['_primary_keys'] = primary_keys attrs['_db_map'] = db_map attrs['_pk_name'] = pk_name attrs['_dynamic_columns'] = {} diff --git a/cqlengine/query.py b/cqlengine/query.py index 7efd1929a9..e6cccc9fc7 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -432,7 +432,25 @@ def save(self, instance): cur = conn.cursor() cur.execute(qs, field_values) - #TODO: delete deleted / nulled fields + #TODO: delete deleted / nulled columns + deleted = [k for k,v in instance._values.items() if v.deleted] + if deleted: + import ipdb; ipdb.set_trace() + del_fields = [self.model._columns[f] for f in deleted] + del_fields = [f.db_field_name for f in del_fields if not f.primary_key] + pks = self.model._primary_keys + qs = ['DELETE {}'.format(', '.join(del_fields))] + qs += ['FROM {}'.format(self.column_family_name)] + qs += ['WHERE'] + eq = lambda col: '{0} = :{0}'.format(v.db_field_name) + qs += [' AND '.join([eq(f) for f in pks.values()])] + qs = ' '.join(qs) + + pk_dict = dict([(v.db_field_name, getattr(instance, k)) for k,v in pks.items()]) + cur.execute(qs, pk_dict) + + + def create(self, **kwargs): return self.model(**kwargs).save() diff --git a/cqlengine/tests/model/test_model_io.py b/cqlengine/tests/model/test_model_io.py index cf43f96dc0..09a7280979 100644 --- a/cqlengine/tests/model/test_model_io.py +++ b/cqlengine/tests/model/test_model_io.py @@ -10,7 +10,7 @@ class TestModel(Model): count = columns.Integer() text = columns.Text() - + class TestModelIO(BaseCassEngTestCase): @classmethod @@ -54,11 +54,6 @@ def test_model_deleting_works_properly(self): tm2 = TestModel.objects(id=tm.pk).first() self.assertIsNone(tm2) - def test_nullable_columns_are_saved_properly(self): - """ - Tests that nullable columns save without any trouble - """ - def test_column_deleting_works_properly(self): """ """ diff --git a/cqlengine/tests/query/test_queryset.py b/cqlengine/tests/query/test_queryset.py index 256e681697..b3c1449ad8 100644 --- a/cqlengine/tests/query/test_queryset.py +++ b/cqlengine/tests/query/test_queryset.py @@ -78,16 +78,6 @@ def test_queryset_is_immutable(self): query2 = query1.filter(expected_result__gte=1) assert len(query2._where) == 2 - def test_queryset_slicing(self): - """ - Check that the limit and start is implemented as iterator slices - """ - - def test_proper_delete_behavior(self): - """ - Tests that deleting the contents of a queryset works properly - """ - def test_the_all_method_clears_where_filter(self): """ Tests that calling all on a queryset with previously defined filters returns a queryset with no filters From 67dd9151a9abbde3aca24390eb5fceee2bf5b89e Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sat, 24 Nov 2012 17:37:57 -0800 Subject: [PATCH 0037/3726] fixing name conflict --- cqlengine/models.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/cqlengine/models.py b/cqlengine/models.py index 3893bb9e0a..f51c5dd27e 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -77,12 +77,12 @@ def __new__(cls, name, bases, attrs): """ #move column definitions into columns dict #and set default column names - columns = OrderedDict() + column_dict = OrderedDict() primary_keys = OrderedDict() pk_name = None def _transform_column(col_name, col_obj): - columns[col_name] = col_obj + column_dict[col_name] = col_obj if col_obj.primary_key: primary_keys[col_name] = col_obj col_obj.set_column_name(col_name) @@ -115,7 +115,7 @@ def _transform_column(col_name, col_obj): #check for duplicate column names col_names = set() - for v in columns.values(): + for v in column_dict.values(): if v.db_field_name in col_names: raise ModelException("{} defines the column {} more than once".format(name, v.db_field_name)) col_names.add(v.db_field_name) @@ -131,11 +131,11 @@ def _transform_column(col_name, col_obj): #create db_name -> model name map for loading db_map = {} - for field_name, col in columns.items(): + for field_name, col in column_dict.items(): db_map[col.db_field_name] = field_name #add management members to the class - attrs['_columns'] = columns + attrs['_columns'] = column_dict attrs['_primary_keys'] = primary_keys attrs['_db_map'] = db_map attrs['_pk_name'] = pk_name From 405a0240ad651b3d5967028f27073a0b9fa8f6b2 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sat, 24 Nov 2012 17:38:19 -0800 Subject: [PATCH 0038/3726] refactoring column null arg to required --- cqlengine/columns.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index 9236d44e2e..b62666001f 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -43,22 +43,20 @@ class BaseColumn(object): instance_counter = 0 - def __init__(self, primary_key=False, index=False, db_field=None, default=None, null=False): + def __init__(self, primary_key=False, index=False, db_field=None, default=None, required=True): """ :param primary_key: bool flag, indicates this column is a primary key. The first primary key defined on a model is the partition key, all others are cluster keys :param index: bool flag, indicates an index should be created for this column :param db_field: the fieldname this field will map to in the database :param default: the default value, can be a value or a callable (no args) - :param null: boolean, is the field nullable? + :param required: boolean, is the field required? """ self.primary_key = primary_key self.index = index self.db_field = db_field self.default = default - - #TODO: make this required, since fields won't actually be nulled, but deleted - self.null = null + self.required = required #the column name in the model definition self.column_name = None @@ -77,8 +75,8 @@ def validate(self, value): if value is None: if self.has_default: return self.get_default() - elif not self.null: - raise ValidationError('null values are not allowed') + elif self.required: + raise ValidationError('{} - None values are not allowed'.format(self.column_name or self.db_field)) return value def to_python(self, value): From 823e21fdee28b8b1fa284fcd475b98a48d179520 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sat, 24 Nov 2012 17:48:40 -0800 Subject: [PATCH 0039/3726] testing and debugging column deleting --- cqlengine/query.py | 7 ++++--- cqlengine/tests/model/test_model_io.py | 10 +++++++++- 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index e6cccc9fc7..a62b172d18 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -417,7 +417,9 @@ def save(self, instance): #get defined fields and their column names for name, col in self.model._columns.items(): - value_pairs += [(col.db_field_name, values.get(name))] + val = values.get(name) + if val is None: continue + value_pairs += [(col.db_field_name, val)] #construct query string field_names = zip(*value_pairs)[0] @@ -435,14 +437,13 @@ def save(self, instance): #TODO: delete deleted / nulled columns deleted = [k for k,v in instance._values.items() if v.deleted] if deleted: - import ipdb; ipdb.set_trace() del_fields = [self.model._columns[f] for f in deleted] del_fields = [f.db_field_name for f in del_fields if not f.primary_key] pks = self.model._primary_keys qs = ['DELETE {}'.format(', '.join(del_fields))] qs += ['FROM {}'.format(self.column_family_name)] qs += ['WHERE'] - eq = lambda col: '{0} = :{0}'.format(v.db_field_name) + eq = lambda col: '{0} = :{0}'.format(v.column.db_field_name) qs += [' AND '.join([eq(f) for f in pks.values()])] qs = ' '.join(qs) diff --git a/cqlengine/tests/model/test_model_io.py b/cqlengine/tests/model/test_model_io.py index 09a7280979..f408e46ce9 100644 --- a/cqlengine/tests/model/test_model_io.py +++ b/cqlengine/tests/model/test_model_io.py @@ -9,7 +9,7 @@ class TestModel(Model): count = columns.Integer() - text = columns.Text() + text = columns.Text(required=False) class TestModelIO(BaseCassEngTestCase): @@ -57,4 +57,12 @@ def test_model_deleting_works_properly(self): def test_column_deleting_works_properly(self): """ """ + tm = TestModel.objects.create(count=8, text='123456789') + tm.text = None + tm.save() + + tm2 = TestModel.objects(id=tm.pk).first() + assert tm2.text is None + assert tm2._values['text'].initial_value is None + From 2db6426d0278b24b751b9271b2f92eb4199de383 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sat, 24 Nov 2012 23:11:09 -0800 Subject: [PATCH 0040/3726] added fault tolerant connection manager, which will be easily extended to support connection pooling added support for multiple connections removed dependence on setting keyspace name on opening connection --- cqlengine/connection.py | 103 +++++++++++++++++- cqlengine/management.py | 92 ++++++++-------- cqlengine/models.py | 8 +- cqlengine/query.py | 33 +++--- cqlengine/tests/base.py | 7 ++ cqlengine/tests/management/__init__.py | 0 cqlengine/tests/management/test_management.py | 0 7 files changed, 169 insertions(+), 74 deletions(-) create mode 100644 cqlengine/tests/management/__init__.py create mode 100644 cqlengine/tests/management/test_management.py diff --git a/cqlengine/connection.py b/cqlengine/connection.py index dc1a7467fb..6cf5c1ff4a 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -2,17 +2,67 @@ #http://code.google.com/a/apache-extras.org/p/cassandra-dbapi2 / #http://cassandra.apache.org/doc/cql/CQL.html +from collections import namedtuple +import random + import cql + from cqlengine.exceptions import CQLEngineException +from thrift.transport.TTransport import TTransportException + + class CQLConnectionError(CQLEngineException): pass -_keyspace = 'cassengine_test' +Host = namedtuple('Host', ['name', 'port']) +_hosts = [] +_host_idx = 0 +_conn= None +_username = None +_password = None + +def _set_conn(host): + """ + """ + global _conn + _conn = cql.connect(host.name, host.port, user=_username, password=_password) + _conn.set_cql_version('3.0.0') + +def setup(hosts, username=None, password=None): + """ + Records the hosts and connects to one of them + + :param hosts: list of hosts, strings in the :, or just + """ + global _hosts + global _username + global _password + + _username = username + _password = password + + for host in hosts: + host = host.strip() + host = host.split(':') + if len(host) == 1: + _hosts.append(Host(host[0], 9160)) + elif len(host) == 2: + _hosts.append(Host(*host)) + else: + raise CQLConnectionError("Can't parse {}".format(''.join(host))) + + if not _hosts: + raise CQLConnectionError("At least one host required") + + random.shuffle(_hosts) + host = _hosts[_host_idx] + _set_conn(host) + #TODO: look into the cql connection pool class -_conn = {} -def get_connection(keyspace, create_missing_keyspace=True): - con = _conn.get(keyspace) +_old_conn = {} +def get_connection(keyspace=None, create_missing_keyspace=True): + con = _old_conn.get(keyspace) if con is None: con = cql.connect('127.0.0.1', 9160) con.set_cql_version('3.0.0') @@ -27,7 +77,50 @@ def get_connection(keyspace, create_missing_keyspace=True): else: raise CQLConnectionError('"{}" is not an existing keyspace'.format(keyspace)) - _conn[keyspace] = con + _old_conn[keyspace] = con return con +class connection_manager(object): + """ + Connection failure tolerant connection manager. Written to be used in a 'with' block for connection pooling + """ + def __init__(self): + if not _hosts: + raise CQLConnectionError("No connections have been configured, call cqlengine.connection.setup") + self.keyspace = None + self.con = _conn + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + pass + + def execute(self, query, params={}): + """ + Gets a connection from the pool and executes the given query, returns the cursor + + if there's a connection problem, this will silently create a new connection pool + from the available hosts, and remove the problematic host from the host list + """ + global _host_idx + + for i in range(len(_hosts)): + try: + cur = self.con.cursor() + cur.execute(query, params) + return cur + except TTransportException: + #TODO: check for other errors raised in the event of a connection / server problem + #move to the next connection and set the connection pool + self.con_pool.return_connection(self.con) + self.con = None + _host_idx += 1 + _host_idx %= len(_hosts) + host = _hosts[_host_idx] + _set_conn(host) + self.con = _conn + + raise CQLConnectionError("couldn't reach a Cassandra server") + diff --git a/cqlengine/management.py b/cqlengine/management.py index 5b2ad6aa42..ac967059b1 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -1,64 +1,62 @@ from cqlengine.connection import get_connection +from cqlengine.connection import connection_manager def create_keyspace(name): - con = get_connection(None) - cur = con.cursor() - cur.execute("""CREATE KEYSPACE {} - WITH strategy_class = 'SimpleStrategy' - AND strategy_options:replication_factor=1;""".format(name)) + with connection_manager() as con: + con.execute("""CREATE KEYSPACE {} + WITH strategy_class = 'SimpleStrategy' + AND strategy_options:replication_factor=1;""".format(name)) def delete_keyspace(name): - con = get_connection(None) - cur = con.cursor() - cur.execute("DROP KEYSPACE {}".format(name)) + with connection_manager() as con: + con.execute("DROP KEYSPACE {}".format(name)) def create_column_family(model): #construct query string cf_name = model.column_family_name() + raw_cf_name = model.column_family_name(include_keyspace=False) + + with connection_manager() as con: + #check for an existing column family + ks_info = con.con.client.describe_keyspace(model.keyspace) + if not any([raw_cf_name == cf.name for cf in ks_info.cf_defs]): + qs = ['CREATE TABLE {}'.format(cf_name)] + + #add column types + pkeys = [] + qtypes = [] + def add_column(col): + s = '{} {}'.format(col.db_field_name, col.db_type) + if col.primary_key: pkeys.append(col.db_field_name) + qtypes.append(s) + for name, col in model._columns.items(): + add_column(col) + + qtypes.append('PRIMARY KEY ({})'.format(', '.join(pkeys))) + + qs += ['({})'.format(', '.join(qtypes))] + qs = ' '.join(qs) - conn = get_connection(model.keyspace) - cur = conn.cursor() - - #check for an existing column family - ks_info = conn.client.describe_keyspace(model.keyspace) - if not any([cf_name == cf.name for cf in ks_info.cf_defs]): - qs = ['CREATE TABLE {}'.format(cf_name)] - - #add column types - pkeys = [] - qtypes = [] - def add_column(col): - s = '{} {}'.format(col.db_field_name, col.db_type) - if col.primary_key: pkeys.append(col.db_field_name) - qtypes.append(s) - for name, col in model._columns.items(): - add_column(col) - - qtypes.append('PRIMARY KEY ({})'.format(', '.join(pkeys))) - - qs += ['({})'.format(', '.join(qtypes))] - qs = ' '.join(qs) - - cur.execute(qs) + con.execute(qs) - indexes = [c for n,c in model._columns.items() if c.index] - if indexes: - for column in indexes: - #TODO: check for existing index... - #can that be determined from the connection client? - qs = ['CREATE INDEX {}'.format(column.db_index_name)] - qs += ['ON {}'.format(cf_name)] - qs += ['({})'.format(column.db_field_name)] - qs = ' '.join(qs) - cur.execute(qs) + indexes = [c for n,c in model._columns.items() if c.index] + if indexes: + for column in indexes: + #TODO: check for existing index... + #can that be determined from the connection client? + qs = ['CREATE INDEX {}'.format(column.db_index_name)] + qs += ['ON {}'.format(cf_name)] + qs += ['({})'.format(column.db_field_name)] + qs = ' '.join(qs) + con.execute(qs) def delete_column_family(model): #check that model exists cf_name = model.column_family_name() - conn = get_connection(model.keyspace) - ks_info = conn.client.describe_keyspace(model.keyspace) - if any([cf_name == cf.name for cf in ks_info.cf_defs]): - cur = conn.cursor() - cur.execute('drop table {};'.format(cf_name)) + raw_cf_name = model.column_family_name(include_keyspace=False) + with connection_manager() as con: + ks_info = con.con.client.describe_keyspace(model.keyspace) + if any([raw_cf_name == cf.name for cf in ks_info.cf_defs]): + con.execute('drop table {};'.format(cf_name)) diff --git a/cqlengine/models.py b/cqlengine/models.py index f51c5dd27e..b17df09fb3 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -24,10 +24,8 @@ def __init__(self, **values): value_mngr = column.value_manager(self, column, values.get(name, None)) self._values[name] = value_mngr - #TODO: note any absent fields so they're not deleted - @classmethod - def column_family_name(cls): + def column_family_name(cls, include_keyspace=True): """ Returns the column family name if it's been defined otherwise, it creates it from the module and class name @@ -38,7 +36,9 @@ def column_family_name(cls): cf_name = cf_name.replace('.', '_') #trim to less than 48 characters or cassandra will complain cf_name = cf_name[-48:] - return cf_name.lower() + cf_name = cf_name.lower() + if not include_keyspace: return cf_name + return '{}.{}'.format(cls.keyspace, cf_name) @property def pk(self): diff --git a/cqlengine/query.py b/cqlengine/query.py index a62b172d18..bf5c1464d8 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -4,6 +4,7 @@ from time import time from cqlengine.connection import get_connection +from cqlengine.connection import connection_manager from cqlengine.exceptions import CQLEngineException #CQL 3 reference: @@ -220,7 +221,7 @@ def _select_query(self): def __iter__(self): #TODO: cache results if self._cursor is None: - conn = get_connection(self.model.keyspace) + conn = get_connection() self._cursor = conn.cursor() self._cursor.execute(self._select_query(), self._where_values()) self._rowcount = self._cursor.rowcount @@ -350,10 +351,9 @@ def count(self): qs += ['WHERE {}'.format(self._where_clause())] qs = ' '.join(qs) - con = get_connection(self.model.keyspace) - cur = con.cursor() - cur.execute(qs, self._where_values()) - return cur.fetchone()[0] + with connection_manager() as con: + cur = con.execute(qs, self._where_values()) + return cur.fetchone()[0] def limit(self, v): """ @@ -430,11 +430,10 @@ def save(self, instance): qs += ["({})".format(', '.join([':'+f for f in field_names]))] qs = ' '.join(qs) - conn = get_connection(self.model.keyspace) - cur = conn.cursor() - cur.execute(qs, field_values) + with connection_manager() as con: + con.execute(qs, field_values) - #TODO: delete deleted / nulled columns + #delete deleted / nulled columns deleted = [k for k,v in instance._values.items() if v.deleted] if deleted: del_fields = [self.model._columns[f] for f in deleted] @@ -448,10 +447,10 @@ def save(self, instance): qs = ' '.join(qs) pk_dict = dict([(v.db_field_name, getattr(instance, k)) for k,v in pks.items()]) - cur.execute(qs, pk_dict) - - + with connection_manager() as con: + con.execute(qs, pk_dict) + def create(self, **kwargs): return self.model(**kwargs).save() @@ -466,9 +465,8 @@ def delete(self, columns=[]): qs += ['WHERE {}'.format(self._where_clause())] qs = ' '.join(qs) - con = get_connection(self.model.keyspace) - cur = con.cursor() - cur.execute(qs, self._where_values()) + with connection_manager() as con: + con.execute(qs, self._where_values()) def delete_instance(self, instance): @@ -478,8 +476,7 @@ def delete_instance(self, instance): qs += ['WHERE {0}=:{0}'.format(pk_name)] qs = ' '.join(qs) - conn = get_connection(self.model.keyspace) - cur = conn.cursor() - cur.execute(qs, {pk_name:instance.pk}) + with connection_manager() as con: + con.execute(qs, {pk_name:instance.pk}) diff --git a/cqlengine/tests/base.py b/cqlengine/tests/base.py index ac1f31d8f1..d94ea69b04 100644 --- a/cqlengine/tests/base.py +++ b/cqlengine/tests/base.py @@ -1,7 +1,14 @@ from unittest import TestCase +from cqlengine import connection class BaseCassEngTestCase(TestCase): + @classmethod + def setUpClass(cls): + super(BaseCassEngTestCase, cls).setUpClass() + if not connection._hosts: + connection.setup(['localhost']) + def assertHasAttr(self, obj, attr): self.assertTrue(hasattr(obj, attr), "{} doesn't have attribute: {}".format(obj, attr)) diff --git a/cqlengine/tests/management/__init__.py b/cqlengine/tests/management/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cqlengine/tests/management/test_management.py b/cqlengine/tests/management/test_management.py new file mode 100644 index 0000000000..e69de29bb2 From 6816148ef5e1c9d3a173bfdc52043f94f338e827 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 25 Nov 2012 10:47:32 -0800 Subject: [PATCH 0041/3726] removing and get_connection and editing files depending on it --- cqlengine/connection.py | 22 ---------------------- cqlengine/management.py | 1 - cqlengine/query.py | 7 +++---- 3 files changed, 3 insertions(+), 27 deletions(-) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index 6cf5c1ff4a..84d6d1eb5b 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -59,28 +59,6 @@ def setup(hosts, username=None, password=None): _set_conn(host) -#TODO: look into the cql connection pool class -_old_conn = {} -def get_connection(keyspace=None, create_missing_keyspace=True): - con = _old_conn.get(keyspace) - if con is None: - con = cql.connect('127.0.0.1', 9160) - con.set_cql_version('3.0.0') - - if keyspace: - try: - con.set_initial_keyspace(keyspace) - except cql.ProgrammingError, e: - if create_missing_keyspace: - from cqlengine.management import create_keyspace - create_keyspace(keyspace) - else: - raise CQLConnectionError('"{}" is not an existing keyspace'.format(keyspace)) - - _old_conn[keyspace] = con - - return con - class connection_manager(object): """ Connection failure tolerant connection manager. Written to be used in a 'with' block for connection pooling diff --git a/cqlengine/management.py b/cqlengine/management.py index ac967059b1..97c565c12b 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -1,4 +1,3 @@ -from cqlengine.connection import get_connection from cqlengine.connection import connection_manager def create_keyspace(name): diff --git a/cqlengine/query.py b/cqlengine/query.py index bf5c1464d8..921e8d19af 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -3,7 +3,6 @@ from hashlib import md5 from time import time -from cqlengine.connection import get_connection from cqlengine.connection import connection_manager from cqlengine.exceptions import CQLEngineException @@ -221,9 +220,9 @@ def _select_query(self): def __iter__(self): #TODO: cache results if self._cursor is None: - conn = get_connection() - self._cursor = conn.cursor() - self._cursor.execute(self._select_query(), self._where_values()) + #TODO: the query and caching should happen in the same function + with connection_manager() as con: + self._cursor = con.execute(self._select_query(), self._where_values()) self._rowcount = self._cursor.rowcount return self From 3038afbfc17a02d3e105222689f6ddc2bcacc9e6 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 25 Nov 2012 11:00:42 -0800 Subject: [PATCH 0042/3726] adding BSD3 license and authors file --- AUTHORS | 8 ++++++++ LICENSE | 9 +++++++++ 2 files changed, 17 insertions(+) create mode 100644 AUTHORS create mode 100644 LICENSE diff --git a/AUTHORS b/AUTHORS new file mode 100644 index 0000000000..3a69691a3e --- /dev/null +++ b/AUTHORS @@ -0,0 +1,8 @@ +PRIMARY AUTHORS + +Blake Eggleston + +CONTRIBUTORS + +Eric Scrivner + diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000000..3833bbf351 --- /dev/null +++ b/LICENSE @@ -0,0 +1,9 @@ +Copyright (c) 2012, Blake Eggleston and AUTHORS +All rights reserved. + +* Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: +* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. +* Neither the name of the cqlengine nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. From f0686a50af396454ab06bf4b9a86091119111282 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 25 Nov 2012 11:49:33 -0800 Subject: [PATCH 0043/3726] updating readme --- README.md | 77 +++++++++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 66 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index cc2d2fc976..7405b89433 100644 --- a/README.md +++ b/README.md @@ -1,18 +1,73 @@ -cassandraengine +cqlengine =============== -Cassandra ORM for Python in the style of the Django orm and mongoengine +cqlengine is a Cassandra CQL ORM for Python in the style of the Django orm and mongoengine -In it's current state you can define column families, create and delete column families -based on your model definiteions, save models and retrieve models by their primary keys. +[Documentation](https://github.com/bdeggleston/cqlengine/wiki/cqlengine-documentation) -That's about it. Also, the CQL stuff is very basic at this point. +[Report a Bug](https://github.com/bdeggleston/cqlengine/issues) -##TODO -* column ttl? -* ForeignKey/DBRef fields? -* query functionality -* Match column names to mongoengine field names? -* nice column and model class __repr__ +[Users Mailing List](https://groups.google.com/forum/?fromgroups#!forum/cqlengine-users) +[Dev Mailing List](https://groups.google.com/forum/?fromgroups#!forum/cqlengine-dev) +## Installation +TODO + +## Getting Started + +```python +#first, define a model +>>> from cqlengine import columns +>>> from cqlengine.models import Model + +>>> class ExampleModel(Model): +>>> example_id = columns.UUID(primary_key=True) +>>> example_type = columns.Integer(index=True) +>>> created_at = columns.DateTime() +>>> description = columns.Text(required=False) + +#next, setup the connection to your cassandra server(s)... +>>> from cqlengine import connection +>>> connection.setup(['127.0.0.1:9160']) + +#...and create your CQL table +>>> from cqlengine.management import create_column_family +>>> create_column_family(ExampleModel) + +#now we can create some rows: +>>> em1 = ExampleModel.objects.create(example_type=0, description="example1") +>>> em2 = ExampleModel.objects.create(example_type=0, description="example2") +>>> em3 = ExampleModel.objects.create(example_type=0, description="example3") +>>> em4 = ExampleModel.objects.create(example_type=0, description="example4") +>>> em5 = ExampleModel.objects.create(example_type=1, description="example5") +>>> em6 = ExampleModel.objects.create(example_type=1, description="example6") +>>> em7 = ExampleModel.objects.create(example_type=1, description="example7") +>>> em8 = ExampleModel.objects.create(example_type=1, description="example8") +# Note: the UUID and DateTime columns will create uuid4 and datetime.now +# values automatically if we don't specify them when creating new rows + +#and now we can run some queries against our table +>>> ExampleModel.objects.count() +8 +>>> q = ExampleModel.objects(example_type=1) +>>> q.count() +4 +>>> for instance in q: +>>> print q.description +example5 +example6 +example7 +example8 + +#here we are applying additional filtering to an existing query +#query objects are immutable, so calling filter returns a new +#query object +>>> q2 = q.filter(example_id=em5.example_id) + +>>> q2.count() +1 +>>> for instance in q2: +>>> print q.description +example5 +``` From 1038b6b13251b8784ddc302c524e5d6ddc0cf5ff Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 25 Nov 2012 11:54:49 -0800 Subject: [PATCH 0044/3726] added automatic keyspace generation to create_column_family added checks for existing keyspaces in create_keyspace and delete_keyspace --- cqlengine/management.py | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/cqlengine/management.py b/cqlengine/management.py index 97c565c12b..de0e93a45c 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -2,19 +2,25 @@ def create_keyspace(name): with connection_manager() as con: - con.execute("""CREATE KEYSPACE {} - WITH strategy_class = 'SimpleStrategy' - AND strategy_options:replication_factor=1;""".format(name)) + if name not in [k.name for k in con.con.client.describe_keyspaces()]: + con.execute("""CREATE KEYSPACE {} + WITH strategy_class = 'SimpleStrategy' + AND strategy_options:replication_factor=1;""".format(name)) def delete_keyspace(name): with connection_manager() as con: - con.execute("DROP KEYSPACE {}".format(name)) + if name in [k.name for k in con.con.client.describe_keyspaces()]: + con.execute("DROP KEYSPACE {}".format(name)) -def create_column_family(model): +def create_column_family(model, create_missing_keyspace=True): #construct query string cf_name = model.column_family_name() raw_cf_name = model.column_family_name(include_keyspace=False) + #create missing keyspace + if create_missing_keyspace: + create_keyspace(model.keyspace) + with connection_manager() as con: #check for an existing column family ks_info = con.con.client.describe_keyspace(model.keyspace) From 382a63875b83ec7a2ab267559e60caec42abfe55 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 25 Nov 2012 12:16:33 -0800 Subject: [PATCH 0045/3726] changing docs url --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 7405b89433..ba909d4c8c 100644 --- a/README.md +++ b/README.md @@ -3,7 +3,7 @@ cqlengine cqlengine is a Cassandra CQL ORM for Python in the style of the Django orm and mongoengine -[Documentation](https://github.com/bdeggleston/cqlengine/wiki/cqlengine-documentation) +[Documentation](https://github.com/bdeggleston/cqlengine/wiki/Documentation) [Report a Bug](https://github.com/bdeggleston/cqlengine/issues) From 11e217203c3d6523a03fbdd7fdff8d04494038e6 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 25 Nov 2012 14:44:05 -0800 Subject: [PATCH 0046/3726] refactoring BaseColumn name --- cqlengine/columns.py | 28 ++++++++++++++-------------- cqlengine/models.py | 1 + 2 files changed, 15 insertions(+), 14 deletions(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index b62666001f..98abb96511 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -35,7 +35,7 @@ def get_property(self): else: return property(_get, _set) -class BaseColumn(object): +class Column(object): #the cassandra type this column maps to db_type = None @@ -64,8 +64,8 @@ def __init__(self, primary_key=False, index=False, db_field=None, default=None, self.value = None #keep track of instantiation order - self.position = BaseColumn.instance_counter - BaseColumn.instance_counter += 1 + self.position = Column.instance_counter + Column.instance_counter += 1 def validate(self, value): """ @@ -136,16 +136,16 @@ def db_index_name(self): """ Returns the name of the cql index """ return 'index_{}'.format(self.db_field_name) -class Bytes(BaseColumn): +class Bytes(Column): db_type = 'blob' -class Ascii(BaseColumn): +class Ascii(Column): db_type = 'ascii' -class Text(BaseColumn): +class Text(Column): db_type = 'text' -class Integer(BaseColumn): +class Integer(Column): db_type = 'int' def validate(self, value): @@ -161,13 +161,13 @@ def to_python(self, value): def to_database(self, value): return self.validate(value) -class DateTime(BaseColumn): +class DateTime(Column): db_type = 'timestamp' def __init__(self, **kwargs): super(DateTime, self).__init__(**kwargs) raise NotImplementedError -class UUID(BaseColumn): +class UUID(Column): """ Type 1 or 4 UUID """ @@ -186,7 +186,7 @@ def validate(self, value): raise ValidationError("{} is not a valid uuid".format(value)) return _UUID(val) -class Boolean(BaseColumn): +class Boolean(Column): db_type = 'boolean' def to_python(self, value): @@ -195,7 +195,7 @@ def to_python(self, value): def to_database(self, value): return bool(value) -class Float(BaseColumn): +class Float(Column): db_type = 'double' def validate(self, value): @@ -210,20 +210,20 @@ def to_python(self, value): def to_database(self, value): return self.validate(value) -class Decimal(BaseColumn): +class Decimal(Column): db_type = 'decimal' #TODO: decimal field def __init__(self, **kwargs): super(DateTime, self).__init__(**kwargs) raise NotImplementedError -class Counter(BaseColumn): +class Counter(Column): #TODO: counter field def __init__(self, **kwargs): super(Counter, self).__init__(**kwargs) raise NotImplementedError -class ForeignKey(BaseColumn): +class ForeignKey(Column): #TODO: Foreign key field def __init__(self, **kwargs): super(ForeignKey, self).__init__(**kwargs) diff --git a/cqlengine/models.py b/cqlengine/models.py index b17df09fb3..7b5dc707de 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -103,6 +103,7 @@ def _transform_column(col_name, col_obj): k,v = 'id', columns.UUID(primary_key=True) column_definitions = [(k,v)] + column_definitions + #TODO: check that the defined columns don't conflict with any of the Model API's existing attributes/methods #transform column definitions for k,v in column_definitions: if pk_name is None and v.primary_key: From bf62e234ecd86e040eb74827d8c9d3cc5bd71f53 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 25 Nov 2012 15:20:02 -0800 Subject: [PATCH 0047/3726] added double/single precision option to float field fixing some old references to BaseColumn --- cqlengine/columns.py | 4 ++++ cqlengine/models.py | 2 +- cqlengine/tests/columns/test_validation.py | 2 +- 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index 98abb96511..df7a3a3480 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -198,6 +198,10 @@ def to_database(self, value): class Float(Column): db_type = 'double' + def __init__(self, double_precision=True, **kwargs): + self.db_type = 'double' if double_precision else 'float' + super(Float, self).__init__(**kwargs) + def validate(self, value): try: return float(value) diff --git a/cqlengine/models.py b/cqlengine/models.py index 7b5dc707de..d977742fbc 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -95,7 +95,7 @@ def _transform_column(col_name, col_obj): else: attrs[col_name] = property(_get, _set, _del) - column_definitions = [(k,v) for k,v in attrs.items() if isinstance(v, columns.BaseColumn)] + column_definitions = [(k,v) for k,v in attrs.items() if isinstance(v, columns.Column)] column_definitions = sorted(column_definitions, lambda x,y: cmp(x[1].position, y[1].position)) #prepend primary key if none has been defined diff --git a/cqlengine/tests/columns/test_validation.py b/cqlengine/tests/columns/test_validation.py index f916fb0ebe..4043dfe89a 100644 --- a/cqlengine/tests/columns/test_validation.py +++ b/cqlengine/tests/columns/test_validation.py @@ -2,7 +2,7 @@ from cqlengine.tests.base import BaseCassEngTestCase -from cqlengine.columns import BaseColumn +from cqlengine.columns import Column from cqlengine.columns import Bytes from cqlengine.columns import Ascii from cqlengine.columns import Text From f62cc9dab6b7f8a3a5c80c324216805881094eea Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 25 Nov 2012 18:28:13 -0800 Subject: [PATCH 0048/3726] adding scary alpha warning --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index ba909d4c8c..a218535d91 100644 --- a/README.md +++ b/README.md @@ -11,6 +11,8 @@ cqlengine is a Cassandra CQL ORM for Python in the style of the Django orm and m [Dev Mailing List](https://groups.google.com/forum/?fromgroups#!forum/cqlengine-dev) +**NOTE: cqlengine is in alpha and under development, some features may change (hopefully with notice). Make sure to check the changelog and test your app before upgrading** + ## Installation TODO From bd940eff49e1877ffbb4c9487a67d451db2c8349 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 25 Nov 2012 18:28:35 -0800 Subject: [PATCH 0049/3726] adding datetime and decimal columns and tests --- cqlengine/columns.py | 17 +++++--- cqlengine/models.py | 4 +- cqlengine/query.py | 7 +-- cqlengine/tests/columns/test_validation.py | 51 ++++++++++++++++++++++ 4 files changed, 70 insertions(+), 9 deletions(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index df7a3a3480..617b0d4b12 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -1,4 +1,5 @@ #column field types +from datetime import datetime import re from uuid import uuid1, uuid4 @@ -165,7 +166,17 @@ class DateTime(Column): db_type = 'timestamp' def __init__(self, **kwargs): super(DateTime, self).__init__(**kwargs) - raise NotImplementedError + + def to_python(self, value): + if isinstance(value, datetime): + return value + return datetime.fromtimestamp(value) + + def to_database(self, value): + value = super(DateTime, self).to_database(value) + if not isinstance(value, datetime): + raise ValidationError("'{}' is not a datetime object".format(value)) + return value.strftime('%Y-%m-%d %H:%M:%S') class UUID(Column): """ @@ -216,10 +227,6 @@ def to_database(self, value): class Decimal(Column): db_type = 'decimal' - #TODO: decimal field - def __init__(self, **kwargs): - super(DateTime, self).__init__(**kwargs) - raise NotImplementedError class Counter(Column): #TODO: counter field diff --git a/cqlengine/models.py b/cqlengine/models.py index d977742fbc..c629d2041f 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -21,7 +21,9 @@ class BaseModel(object): def __init__(self, **values): self._values = {} for name, column in self._columns.items(): - value_mngr = column.value_manager(self, column, values.get(name, None)) + value = values.get(name, None) + if value is not None: value = column.to_python(value) + value_mngr = column.value_manager(self, column, value) self._values[name] = value_mngr @classmethod diff --git a/cqlengine/query.py b/cqlengine/query.py index 921e8d19af..bb7a5e519f 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -117,9 +117,8 @@ class LessThanOrEqualOperator(QueryOperator): cql_symbol = '<=' class QuerySet(object): - #TODO: delete empty columns on save - #TODO: support specifying columns to exclude or select only #TODO: cache results in this instance, but don't copy them on deepcopy + #TODO: support multiple iterators def __init__(self, model): super(QuerySet, self).__init__() @@ -224,7 +223,9 @@ def __iter__(self): with connection_manager() as con: self._cursor = con.execute(self._select_query(), self._where_values()) self._rowcount = self._cursor.rowcount - return self + return self + else: + raise QueryException("QuerySet only supports a single iterator at a time, though this will be fixed shortly") def __getitem__(self, s): diff --git a/cqlengine/tests/columns/test_validation.py b/cqlengine/tests/columns/test_validation.py index 4043dfe89a..3f70ab12cd 100644 --- a/cqlengine/tests/columns/test_validation.py +++ b/cqlengine/tests/columns/test_validation.py @@ -1,4 +1,6 @@ #tests the behavior of the column classes +from datetime import datetime +from decimal import Decimal as D from cqlengine.tests.base import BaseCassEngTestCase @@ -13,4 +15,53 @@ from cqlengine.columns import Float from cqlengine.columns import Decimal +from cqlengine.management import create_column_family, delete_column_family +from cqlengine.models import Model + +class TestDatetime(BaseCassEngTestCase): + class DatetimeTest(Model): + test_id = Integer(primary_key=True) + created_at = DateTime() + + @classmethod + def setUpClass(cls): + super(TestDatetime, cls).setUpClass() + create_column_family(cls.DatetimeTest) + + @classmethod + def tearDownClass(cls): + super(TestDatetime, cls).tearDownClass() + delete_column_family(cls.DatetimeTest) + + def test_datetime_io(self): + now = datetime.now() + dt = self.DatetimeTest.objects.create(test_id=0, created_at=now) + dt2 = self.DatetimeTest.objects(test_id=0).first() + assert dt2.created_at.timetuple()[:6] == now.timetuple()[:6] + +class TestDecimal(BaseCassEngTestCase): + class DecimalTest(Model): + test_id = Integer(primary_key=True) + dec_val = Decimal() + + @classmethod + def setUpClass(cls): + super(TestDecimal, cls).setUpClass() + create_column_family(cls.DecimalTest) + + @classmethod + def tearDownClass(cls): + super(TestDecimal, cls).tearDownClass() + delete_column_family(cls.DecimalTest) + + def test_datetime_io(self): + dt = self.DecimalTest.objects.create(test_id=0, dec_val=D('0.00')) + dt2 = self.DecimalTest.objects(test_id=0).first() + assert dt2.dec_val == dt.dec_val + + dt = self.DecimalTest.objects.create(test_id=0, dec_val=5) + dt2 = self.DecimalTest.objects(test_id=0).first() + assert dt2.dec_val == D('5') + + From 168c1136023e81df93f7c1ea69e25ebfe24e0a61 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 25 Nov 2012 18:56:38 -0800 Subject: [PATCH 0050/3726] adding setuptools stuff --- MANIFEST.in | 5 +++++ cqlengine/__init__.py | 2 ++ setup.py | 43 +++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 50 insertions(+) create mode 100644 MANIFEST.in create mode 100644 setup.py diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 0000000000..d7e1cb6199 --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,5 @@ +include AUTHORS +include LICENSE +include README.md +recursive-include cqlengine * + diff --git a/cqlengine/__init__.py b/cqlengine/__init__.py index 1af8944a10..0a826654f8 100644 --- a/cqlengine/__init__.py +++ b/cqlengine/__init__.py @@ -1,3 +1,5 @@ from cqlengine.columns import * from cqlengine.models import Model +__version__ = '0.0.1-ALPHA' + diff --git a/setup.py b/setup.py new file mode 100644 index 0000000000..afd921b17c --- /dev/null +++ b/setup.py @@ -0,0 +1,43 @@ +from setuptools import setup, find_packages + +version = '0.0.1-ALPHA' + +long_desc = """ +cqlengine is a Cassandra CQL ORM for Python in the style of the Django orm and mongoengine + +[Documentation](https://github.com/bdeggleston/cqlengine/wiki/Documentation) + +[Report a Bug](https://github.com/bdeggleston/cqlengine/issues) + +[Users Mailing List](https://groups.google.com/forum/?fromgroups#!forum/cqlengine-users) + +[Dev Mailing List](https://groups.google.com/forum/?fromgroups#!forum/cqlengine-dev) + +**NOTE: cqlengine is in alpha and under development, some features may change (hopefully with notice). Make sure to check the changelog and test your app before upgrading** +""" + +setup( + name='cqlengine', + version=version, + description='Cassandra CQL ORM for Python in the style of the Django orm and mongoengine', + long_description=long_desc, + classifiers = [ + "Development Status :: 3 - Alpha", + "Environment :: Web Environment", + "Environment :: Plugins", + "License :: OSI Approved :: BSD License", + "Operating System :: OS Independent", + "Programming Language :: Python :: 2.6", + "Programming Language :: Python :: 2.7", + "Topic :: Internet :: WWW/HTTP", + "Topic :: Software Development :: Libraries :: Python Modules", + ], + keywords='cassandra,cql,orm', + author='Blake Eggleston', + author_email='bdeggleston@gmail.com', + url='https://github.com/bdeggleston/cqlengine', + license='BSD', + packages=find_packages(), + include_package_data=True, +) + From f6c51ae55e23a3fdaeb0d9fc9b0bca61acaaf5e2 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 25 Nov 2012 19:04:28 -0800 Subject: [PATCH 0051/3726] adding cql dependency --- setup.py | 1 + 1 file changed, 1 insertion(+) diff --git a/setup.py b/setup.py index afd921b17c..8fba346530 100644 --- a/setup.py +++ b/setup.py @@ -33,6 +33,7 @@ "Topic :: Software Development :: Libraries :: Python Modules", ], keywords='cassandra,cql,orm', + install_requires = ['cql'], author='Blake Eggleston', author_email='bdeggleston@gmail.com', url='https://github.com/bdeggleston/cqlengine', From 1aa7f4ea11a0a96da69ce5b8a7dffad7386ad794 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 25 Nov 2012 21:17:14 -0800 Subject: [PATCH 0052/3726] adding github dependency link --- setup.py | 1 + 1 file changed, 1 insertion(+) diff --git a/setup.py b/setup.py index 8fba346530..fbe3451c70 100644 --- a/setup.py +++ b/setup.py @@ -20,6 +20,7 @@ name='cqlengine', version=version, description='Cassandra CQL ORM for Python in the style of the Django orm and mongoengine', + dependency_links = ['https://github.com/bdeggleston/cqlengine/archive/0.0.1-ALPHA.tar.gz#egg=cqlengine-0.0.1-ALPHA.tar.gz'], long_description=long_desc, classifiers = [ "Development Status :: 3 - Alpha", From 823ebbf8c7346b6d44908414d6e6318fa6dd7e55 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 25 Nov 2012 21:49:24 -0800 Subject: [PATCH 0053/3726] removing old reference to con_pool from connection --- cqlengine/connection.py | 1 - setup.py | 4 ++++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index 84d6d1eb5b..65147f6ac9 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -92,7 +92,6 @@ def execute(self, query, params={}): except TTransportException: #TODO: check for other errors raised in the event of a connection / server problem #move to the next connection and set the connection pool - self.con_pool.return_connection(self.con) self.con = None _host_idx += 1 _host_idx %= len(_hosts) diff --git a/setup.py b/setup.py index fbe3451c70..570fe6ac31 100644 --- a/setup.py +++ b/setup.py @@ -1,5 +1,9 @@ from setuptools import setup, find_packages +#next time: +#python setup.py register +#python setup.py sdist upload + version = '0.0.1-ALPHA' long_desc = """ From 35b83d448577c0b90937cd1dcda994e2b3680de2 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 25 Nov 2012 21:56:25 -0800 Subject: [PATCH 0054/3726] updating version --- cqlengine/__init__.py | 2 +- setup.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cqlengine/__init__.py b/cqlengine/__init__.py index 0a826654f8..6822d1af02 100644 --- a/cqlengine/__init__.py +++ b/cqlengine/__init__.py @@ -1,5 +1,5 @@ from cqlengine.columns import * from cqlengine.models import Model -__version__ = '0.0.1-ALPHA' +__version__ = '0.0.2-ALPHA' diff --git a/setup.py b/setup.py index 570fe6ac31..e422ca0be6 100644 --- a/setup.py +++ b/setup.py @@ -4,7 +4,7 @@ #python setup.py register #python setup.py sdist upload -version = '0.0.1-ALPHA' +version = '0.0.2-ALPHA' long_desc = """ cqlengine is a Cassandra CQL ORM for Python in the style of the Django orm and mongoengine @@ -24,7 +24,7 @@ name='cqlengine', version=version, description='Cassandra CQL ORM for Python in the style of the Django orm and mongoengine', - dependency_links = ['https://github.com/bdeggleston/cqlengine/archive/0.0.1-ALPHA.tar.gz#egg=cqlengine-0.0.1-ALPHA.tar.gz'], + dependency_links = ['https://github.com/bdeggleston/cqlengine/archive/0.0.2-ALPHA.tar.gz#egg=cqlengine-0.0.2-ALPHA'], long_description=long_desc, classifiers = [ "Development Status :: 3 - Alpha", From 69546acca86118e848bd72b7bddc74cd67595326 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 25 Nov 2012 22:02:01 -0800 Subject: [PATCH 0055/3726] adding pip instructions --- README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index a218535d91..72780d257d 100644 --- a/README.md +++ b/README.md @@ -14,7 +14,9 @@ cqlengine is a Cassandra CQL ORM for Python in the style of the Django orm and m **NOTE: cqlengine is in alpha and under development, some features may change (hopefully with notice). Make sure to check the changelog and test your app before upgrading** ## Installation -TODO +``` +pip install cqlengine +``` ## Getting Started From d844c0ddeb669108195ce1b6a0a119a176002d4a Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Mon, 26 Nov 2012 21:54:53 -0800 Subject: [PATCH 0056/3726] adding queryset result caching and array indexing / slicing of querysets --- cqlengine/query.py | 117 +++++++++++++++---------- cqlengine/tests/query/test_queryset.py | 49 ++++++++++- 2 files changed, 116 insertions(+), 50 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index bb7a5e519f..74e8810cc1 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -117,8 +117,6 @@ class LessThanOrEqualOperator(QueryOperator): cql_symbol = '<=' class QuerySet(object): - #TODO: cache results in this instance, but don't copy them on deepcopy - #TODO: support multiple iterators def __init__(self, model): super(QuerySet, self).__init__() @@ -140,9 +138,9 @@ def __init__(self, model): self._only_fields = [] #results cache - self._result_cache = None - self._cursor = None + self._result_cache = None + self._result_idx = None def __unicode__(self): return self._select_query() @@ -156,7 +154,7 @@ def __call__(self, **kwargs): def __deepcopy__(self, memo): clone = self.__class__(self.model) for k,v in self.__dict__.items(): - if k in ['_result_cache']: + if k in ['_cursor', '_result_cache', '_result_idx']: clone.__dict__[k] = None else: clone.__dict__[k] = copy.deepcopy(v, memo) @@ -216,29 +214,65 @@ def _select_query(self): #----Reads------ - def __iter__(self): - #TODO: cache results - if self._cursor is None: - #TODO: the query and caching should happen in the same function - with connection_manager() as con: - self._cursor = con.execute(self._select_query(), self._where_values()) - self._rowcount = self._cursor.rowcount - return self + def _execute_query(self): + with connection_manager() as con: + self._cursor = con.execute(self._select_query(), self._where_values()) + self._result_cache = [None]*self._cursor.rowcount + + def _fill_result_cache_to_idx(self, idx): + if self._result_cache is None: + self._execute_query() + if self._result_idx is None: + self._result_idx = -1 + + names = [i[0] for i in self._cursor.description] + qty = idx - self._result_idx + if qty < 1: + return else: - raise QueryException("QuerySet only supports a single iterator at a time, though this will be fixed shortly") + for values in self._cursor.fetchmany(qty): + value_dict = dict(zip(names, values)) + self._result_idx += 1 + self._result_cache[self._result_idx] = self._construct_instance(value_dict) + + def __iter__(self): + if self._result_cache is None: + self._execute_query() + + for idx in range(len(self._result_cache)): + instance = self._result_cache[idx] + if instance is None: + self._fill_result_cache_to_idx(idx) + yield self._result_cache[idx] def __getitem__(self, s): + if self._result_cache is None: + self._execute_query() + + num_results = len(self._result_cache) if isinstance(s, slice): - #return a new query with limit defined - #start and step are not supported - if s.start: raise QueryException('CQL does not support START') - if s.step: raise QueryException('step is not supported') - return self.limit(s.stop) + #calculate the amount of results that need to be loaded + end = num_results if s.step is None else s.step + if end < 0: + end += num_results + else: + end -= 1 + self._fill_result_cache_to_idx(end) + return self._result_cache[s.start:s.stop:s.step] else: #return the object at this index s = long(s) - raise NotImplementedError + + #handle negative indexing + if s < 0: s += num_results + + if s >= num_results: + raise IndexError + else: + self._fill_result_cache_to_idx(s) + return self._result_cache[s] + def _construct_instance(self, values): #translate column names to model names @@ -251,25 +285,11 @@ def _construct_instance(self, values): field_dict[key] = val return self.model(**field_dict) - def _get_next(self): - """ Gets the next cursor result """ - cur = self._cursor - values = cur.fetchone() - if values is None: return - names = [i[0] for i in cur.description] - value_dict = dict(zip(names, values)) - return self._construct_instance(value_dict) - - def next(self): - instance = self._get_next() - if instance is None: - #TODO: this is inefficient, we should be caching the results - self._cursor = None - raise StopIteration - return instance - def first(self): - return iter(self)._get_next() + try: + return iter(self).next() + except StopIteration: + return None def all(self): clone = copy.deepcopy(self) @@ -345,15 +365,18 @@ def order_by(self, colname): def count(self): """ Returns the number of rows matched by this query """ #TODO: check for previous query execution and return row count if it exists - qs = ['SELECT COUNT(*)'] - qs += ['FROM {}'.format(self.column_family_name)] - if self._where: - qs += ['WHERE {}'.format(self._where_clause())] - qs = ' '.join(qs) - - with connection_manager() as con: - cur = con.execute(qs, self._where_values()) - return cur.fetchone()[0] + if self._result_cache is None: + qs = ['SELECT COUNT(*)'] + qs += ['FROM {}'.format(self.column_family_name)] + if self._where: + qs += ['WHERE {}'.format(self._where_clause())] + qs = ' '.join(qs) + + with connection_manager() as con: + cur = con.execute(qs, self._where_values()) + return cur.fetchone()[0] + else: + return len(self._result_cache) def limit(self, v): """ diff --git a/cqlengine/tests/query/test_queryset.py b/cqlengine/tests/query/test_queryset.py index b3c1449ad8..b90888a002 100644 --- a/cqlengine/tests/query/test_queryset.py +++ b/cqlengine/tests/query/test_queryset.py @@ -210,6 +210,19 @@ def test_multiple_iterations_work_properly(self): compare_set.remove(val) assert len(compare_set) == 0 + def test_multiple_iterators_are_isolated(self): + """ + tests that the use of one iterator does not affect the behavior of another + """ + q = TestModel.objects(test_id=0).order_by('attempt_id') + expected_order = [0,1,2,3] + iter1 = iter(q) + iter2 = iter(q) + for attempt_id in expected_order: + assert iter1.next().attempt_id == attempt_id + assert iter2.next().attempt_id == attempt_id + + class TestQuerySetOrdering(BaseQuerySetUsage): def test_order_by_success_case(self): @@ -238,7 +251,37 @@ def test_ordering_on_indexed_columns_fails(self): q = IndexedTestModel.objects(test_id=0).order_by('attempt_id') class TestQuerySetSlicing(BaseQuerySetUsage): - pass + + def test_out_of_range_index_raises_error(self): + q = TestModel.objects(test_id=0).order_by('attempt_id') + with self.assertRaises(IndexError): + q[10] + + def test_array_indexing_works_properly(self): + q = TestModel.objects(test_id=0).order_by('attempt_id') + expected_order = [0,1,2,3] + for i in range(len(q)): + assert q[i].attempt_id == expected_order[i] + + def test_negative_indexing_works_properly(self): + q = TestModel.objects(test_id=0).order_by('attempt_id') + expected_order = [0,1,2,3] + assert q[-1].attempt_id == expected_order[-1] + assert q[-2].attempt_id == expected_order[-2] + + def test_slicing_works_properly(self): + q = TestModel.objects(test_id=0).order_by('attempt_id') + expected_order = [0,1,2,3] + for model, expect in zip(q[1:3], expected_order[1:3]): + assert model.attempt_id == expect + + def test_negative_slicing(self): + q = TestModel.objects(test_id=0).order_by('attempt_id') + expected_order = [0,1,2,3] + for model, expect in zip(q[-3:], expected_order[-3:]): + assert model.attempt_id == expect + for model, expect in zip(q[:-1], expected_order[:-1]): + assert model.attempt_id == expect class TestQuerySetValidation(BaseQuerySetUsage): @@ -248,7 +291,7 @@ def test_primary_key_or_index_must_be_specified(self): """ with self.assertRaises(query.QueryException): q = TestModel.objects(test_result=25) - iter(q) + [i for i in q] def test_primary_key_or_index_must_have_equal_relation_filter(self): """ @@ -256,7 +299,7 @@ def test_primary_key_or_index_must_have_equal_relation_filter(self): """ with self.assertRaises(query.QueryException): q = TestModel.objects(test_id__gt=0) - iter(q) + [i for i in q] def test_indexed_field_can_be_queried(self): From 74181e2f31545c47c0feb3bcc23bf04ca6e85501 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Mon, 26 Nov 2012 22:11:29 -0800 Subject: [PATCH 0057/3726] making the autogenerated table names a bit more readable --- cqlengine/models.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/cqlengine/models.py b/cqlengine/models.py index c629d2041f..6afa240008 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -1,4 +1,5 @@ from collections import OrderedDict +import re from cqlengine import columns from cqlengine.exceptions import ModelException @@ -34,8 +35,16 @@ def column_family_name(cls, include_keyspace=True): """ if cls.db_name: return cls.db_name.lower() - cf_name = cls.__module__ + '.' + cls.__name__ - cf_name = cf_name.replace('.', '_') + + camelcase = re.compile(r'([a-z])([A-Z])') + ccase = lambda s: camelcase.sub(lambda v: '{}_{}'.format(v.group(1), v.group(2).lower()), s) + + cf_name = '' + module = cls.__module__.split('.') + if module: + cf_name = ccase(module[-1]) + '_' + + cf_name += ccase(cls.__name__) #trim to less than 48 characters or cassandra will complain cf_name = cf_name[-48:] cf_name = cf_name.lower() From 70bea617f41b86a89435e43a703aaba1f809f01b Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Mon, 26 Nov 2012 22:13:21 -0800 Subject: [PATCH 0058/3726] updating version --- cqlengine/__init__.py | 2 +- setup.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cqlengine/__init__.py b/cqlengine/__init__.py index 6822d1af02..90414d015b 100644 --- a/cqlengine/__init__.py +++ b/cqlengine/__init__.py @@ -1,5 +1,5 @@ from cqlengine.columns import * from cqlengine.models import Model -__version__ = '0.0.2-ALPHA' +__version__ = '0.0.3-ALPHA' diff --git a/setup.py b/setup.py index e422ca0be6..8b7adfe59c 100644 --- a/setup.py +++ b/setup.py @@ -4,7 +4,7 @@ #python setup.py register #python setup.py sdist upload -version = '0.0.2-ALPHA' +version = '0.0.3-ALPHA' long_desc = """ cqlengine is a Cassandra CQL ORM for Python in the style of the Django orm and mongoengine @@ -24,7 +24,7 @@ name='cqlengine', version=version, description='Cassandra CQL ORM for Python in the style of the Django orm and mongoengine', - dependency_links = ['https://github.com/bdeggleston/cqlengine/archive/0.0.2-ALPHA.tar.gz#egg=cqlengine-0.0.2-ALPHA'], + dependency_links = ['https://github.com/bdeggleston/cqlengine/archive/{0}.tar.gz#egg=cqlengine-{0}'.format(version)], long_description=long_desc, classifiers = [ "Development Status :: 3 - Alpha", From 6c006578302a91a33d6a26fb6ad2aa9ceac42b90 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Wed, 28 Nov 2012 21:58:56 -0800 Subject: [PATCH 0059/3726] added create shortcut classmethod on BaseModel --- README.md | 16 ++++++++-------- cqlengine/models.py | 4 ++++ cqlengine/tests/model/test_model_io.py | 6 +++--- 3 files changed, 15 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index 72780d257d..5656f0e2ce 100644 --- a/README.md +++ b/README.md @@ -40,14 +40,14 @@ pip install cqlengine >>> create_column_family(ExampleModel) #now we can create some rows: ->>> em1 = ExampleModel.objects.create(example_type=0, description="example1") ->>> em2 = ExampleModel.objects.create(example_type=0, description="example2") ->>> em3 = ExampleModel.objects.create(example_type=0, description="example3") ->>> em4 = ExampleModel.objects.create(example_type=0, description="example4") ->>> em5 = ExampleModel.objects.create(example_type=1, description="example5") ->>> em6 = ExampleModel.objects.create(example_type=1, description="example6") ->>> em7 = ExampleModel.objects.create(example_type=1, description="example7") ->>> em8 = ExampleModel.objects.create(example_type=1, description="example8") +>>> em1 = ExampleModel.create(example_type=0, description="example1") +>>> em2 = ExampleModel.create(example_type=0, description="example2") +>>> em3 = ExampleModel.create(example_type=0, description="example3") +>>> em4 = ExampleModel.create(example_type=0, description="example4") +>>> em5 = ExampleModel.create(example_type=1, description="example5") +>>> em6 = ExampleModel.create(example_type=1, description="example6") +>>> em7 = ExampleModel.create(example_type=1, description="example7") +>>> em8 = ExampleModel.create(example_type=1, description="example8") # Note: the UUID and DateTime columns will create uuid4 and datetime.now # values automatically if we don't specify them when creating new rows diff --git a/cqlengine/models.py b/cqlengine/models.py index 6afa240008..11f7e3f64a 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -69,6 +69,10 @@ def as_dict(self): values[name] = col.to_database(getattr(self, name, None)) return values + @classmethod + def create(cls, **kwargs): + return cls.objects.create(**kwargs) + def save(self): is_new = self.pk is None self.validate() diff --git a/cqlengine/tests/model/test_model_io.py b/cqlengine/tests/model/test_model_io.py index f408e46ce9..85100eeb13 100644 --- a/cqlengine/tests/model/test_model_io.py +++ b/cqlengine/tests/model/test_model_io.py @@ -27,7 +27,7 @@ def test_model_save_and_load(self): """ Tests that models can be saved and retrieved """ - tm = TestModel.objects.create(count=8, text='123456789') + tm = TestModel.create(count=8, text='123456789') tm2 = TestModel.objects(id=tm.pk).first() for cname in tm._columns.keys(): @@ -49,7 +49,7 @@ def test_model_deleting_works_properly(self): """ Tests that an instance's delete method deletes the instance """ - tm = TestModel.objects.create(count=8, text='123456789') + tm = TestModel.create(count=8, text='123456789') tm.delete() tm2 = TestModel.objects(id=tm.pk).first() self.assertIsNone(tm2) @@ -57,7 +57,7 @@ def test_model_deleting_works_properly(self): def test_column_deleting_works_properly(self): """ """ - tm = TestModel.objects.create(count=8, text='123456789') + tm = TestModel.create(count=8, text='123456789') tm.text = None tm.save() From 40dda4d839cb8b2f90aedcaabe5da82d4b587f83 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Wed, 28 Nov 2012 22:03:23 -0800 Subject: [PATCH 0060/3726] adding changelog --- changelog | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 changelog diff --git a/changelog b/changelog new file mode 100644 index 0000000000..9ab74a3b7b --- /dev/null +++ b/changelog @@ -0,0 +1,10 @@ +CHANGELOG + +0.0.4-ALPHA (in progress) +* added create method shortcut to the model class + +0.0.3-ALPHA +* added queryset result caching +* added queryset array index access and slicing +* updating table name generation (more readable) + From fd39fac6aae5de612895820af1fbeab766207604 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 29 Nov 2012 21:23:03 -0800 Subject: [PATCH 0061/3726] adding IDE stuff --- .gitignore | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.gitignore b/.gitignore index 878f8ebd3c..6290413ccc 100644 --- a/.gitignore +++ b/.gitignore @@ -16,6 +16,11 @@ develop-eggs # Installer logs pip-log.txt +# IDE +.project +.pydevproject +.settings/ + # Unit test / coverage reports .coverage .tox From ba7c8c87ee20c97ed05f3a05e12c70cf7c6a5312 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 29 Nov 2012 21:24:02 -0800 Subject: [PATCH 0062/3726] adding get method to QuerySet --- changelog | 1 + cqlengine/models.py | 5 +- cqlengine/query.py | 35 +++++++++--- cqlengine/tests/query/test_queryset.py | 76 ++++++++++++++++++++------ 4 files changed, 90 insertions(+), 27 deletions(-) diff --git a/changelog b/changelog index 9ab74a3b7b..911753f940 100644 --- a/changelog +++ b/changelog @@ -1,6 +1,7 @@ CHANGELOG 0.0.4-ALPHA (in progress) +* added .get() method to QuerySet * added create method shortcut to the model class 0.0.3-ALPHA diff --git a/cqlengine/models.py b/cqlengine/models.py index 11f7e3f64a..0c201cdb6e 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -3,7 +3,7 @@ from cqlengine import columns from cqlengine.exceptions import ModelException -from cqlengine.query import QuerySet +from cqlengine.query import QuerySet, QueryException class ModelDefinitionException(ModelException): pass @@ -11,6 +11,9 @@ class BaseModel(object): """ The base model class, don't inherit from this, inherit from Model, defined below """ + + class DoesNotExist(QueryException): pass + class MultipleObjectsReturned(QueryException): pass #table names will be generated automatically from it's model and package name #however, you can alse define them manually here diff --git a/cqlengine/query.py b/cqlengine/query.py index 74e8810cc1..c85c6c68df 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -215,13 +215,13 @@ def _select_query(self): #----Reads------ def _execute_query(self): - with connection_manager() as con: - self._cursor = con.execute(self._select_query(), self._where_values()) - self._result_cache = [None]*self._cursor.rowcount + if self._result_cache is None: + with connection_manager() as con: + self._cursor = con.execute(self._select_query(), self._where_values()) + self._result_cache = [None]*self._cursor.rowcount def _fill_result_cache_to_idx(self, idx): - if self._result_cache is None: - self._execute_query() + self._execute_query() if self._result_idx is None: self._result_idx = -1 @@ -236,8 +236,7 @@ def _fill_result_cache_to_idx(self, idx): self._result_cache[self._result_idx] = self._construct_instance(value_dict) def __iter__(self): - if self._result_cache is None: - self._execute_query() + self._execute_query() for idx in range(len(self._result_cache)): instance = self._result_cache[idx] @@ -246,8 +245,7 @@ def __iter__(self): yield self._result_cache[idx] def __getitem__(self, s): - if self._result_cache is None: - self._execute_query() + self._execute_query() num_results = len(self._result_cache) @@ -329,6 +327,25 @@ def filter(self, **kwargs): return clone + def get(self, **kwargs): + """ + Returns a single instance matching this query, optionally with additional filter kwargs. + + A DoesNotExistError will be raised if there are no rows matching the query + A MultipleObjectsFoundError will be raised if there is more than one row matching the queyr + """ + if kwargs: return self.filter(**kwargs).get() + self._execute_query() + if len(self._result_cache) == 0: + raise self.model.DoesNotExist + elif len(self._result_cache) > 1: + raise self.model.MultipleObjectsReturned( + '{} objects found'.format(len(self._result_cache))) + else: + return self[0] + + + def order_by(self, colname): """ orders the result set. diff --git a/cqlengine/tests/query/test_queryset.py b/cqlengine/tests/query/test_queryset.py index b90888a002..326bdc9b11 100644 --- a/cqlengine/tests/query/test_queryset.py +++ b/cqlengine/tests/query/test_queryset.py @@ -147,7 +147,7 @@ def tearDownClass(cls): delete_column_family(TestModel) delete_column_family(IndexedTestModel) -class TestQuerySetCountAndSelection(BaseQuerySetUsage): +class TestQuerySetCountSelectionAndIteration(BaseQuerySetUsage): def test_count(self): assert TestModel.objects.count() == 12 @@ -175,22 +175,6 @@ def test_iteration(self): compare_set.remove(val) assert len(compare_set) == 0 - def test_delete(self): - TestModel.objects.create(test_id=3, attempt_id=0, description='try9', expected_result=50, test_result=40) - TestModel.objects.create(test_id=3, attempt_id=1, description='try10', expected_result=60, test_result=40) - TestModel.objects.create(test_id=3, attempt_id=2, description='try11', expected_result=70, test_result=45) - TestModel.objects.create(test_id=3, attempt_id=3, description='try12', expected_result=75, test_result=45) - - assert TestModel.objects.count() == 16 - assert TestModel.objects(test_id=3).count() == 4 - - TestModel.objects(test_id=3).delete() - - assert TestModel.objects.count() == 12 - assert TestModel.objects(test_id=3).count() == 0 - -class TestQuerySetIterator(BaseQuerySetUsage): - def test_multiple_iterations_work_properly(self): """ Tests that iterating over a query set more than once works """ q = TestModel.objects(test_id=0) @@ -222,6 +206,41 @@ def test_multiple_iterators_are_isolated(self): assert iter1.next().attempt_id == attempt_id assert iter2.next().attempt_id == attempt_id + def test_get_success_case(self): + """ + Tests that the .get() method works on new and existing querysets + """ + m = TestModel.objects.get(test_id=0, attempt_id=0) + assert isinstance(m, TestModel) + assert m.test_id == 0 + assert m.attempt_id == 0 + + q = TestModel.objects(test_id=0, attempt_id=0) + m = q.get() + assert isinstance(m, TestModel) + assert m.test_id == 0 + assert m.attempt_id == 0 + + q = TestModel.objects(test_id=0) + m = q.get(attempt_id=0) + assert isinstance(m, TestModel) + assert m.test_id == 0 + assert m.attempt_id == 0 + + def test_get_doesnotexist_exception(self): + """ + Tests that get calls that don't return a result raises a DoesNotExist error + """ + with self.assertRaises(TestModel.DoesNotExist): + TestModel.objects.get(test_id=100) + + def test_get_multipleobjects_exception(self): + """ + Tests that get calls that return multiple results raise a MultipleObjectsReturned error + """ + with self.assertRaises(TestModel.MultipleObjectsReturned): + TestModel.objects.get(test_id=1) + class TestQuerySetOrdering(BaseQuerySetUsage): @@ -310,6 +329,29 @@ def test_indexed_field_can_be_queried(self): count = q.count() assert q.count() == 4 +class TestQuerySetDelete(BaseQuerySetUsage): + + def test_delete(self): + TestModel.objects.create(test_id=3, attempt_id=0, description='try9', expected_result=50, test_result=40) + TestModel.objects.create(test_id=3, attempt_id=1, description='try10', expected_result=60, test_result=40) + TestModel.objects.create(test_id=3, attempt_id=2, description='try11', expected_result=70, test_result=45) + TestModel.objects.create(test_id=3, attempt_id=3, description='try12', expected_result=75, test_result=45) + + assert TestModel.objects.count() == 16 + assert TestModel.objects(test_id=3).count() == 4 + + TestModel.objects(test_id=3).delete() + + assert TestModel.objects.count() == 12 + assert TestModel.objects(test_id=3).count() == 0 + + def test_delete_without_partition_key(self): + + pass + + def test_delete_without_any_where_args(self): + pass + From 10d753be8def8a3cd10bd5cd67ee7d0068882923 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 29 Nov 2012 21:41:24 -0800 Subject: [PATCH 0063/3726] adding partition key validation to delete method --- changelog | 1 + cqlengine/query.py | 7 +++++-- cqlengine/tests/query/test_queryset.py | 9 ++++++--- 3 files changed, 12 insertions(+), 5 deletions(-) diff --git a/changelog b/changelog index 911753f940..6d805ec337 100644 --- a/changelog +++ b/changelog @@ -1,6 +1,7 @@ CHANGELOG 0.0.4-ALPHA (in progress) +* added partition key validation to QuerySet delete method * added .get() method to QuerySet * added create method shortcut to the model class diff --git a/cqlengine/query.py b/cqlengine/query.py index c85c6c68df..3904ae86ee 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -500,9 +500,12 @@ def delete(self, columns=[]): """ Deletes the contents of a query """ + #validate where clause + partition_key = self.model._primary_keys.values()[0] + if not any([c.column == partition_key for c in self._where]): + raise QueryException("The partition key must be defined on delete queries") qs = ['DELETE FROM {}'.format(self.column_family_name)] - if self._where: - qs += ['WHERE {}'.format(self._where_clause())] + qs += ['WHERE {}'.format(self._where_clause())] qs = ' '.join(qs) with connection_manager() as con: diff --git a/cqlengine/tests/query/test_queryset.py b/cqlengine/tests/query/test_queryset.py index 326bdc9b11..842d978a2f 100644 --- a/cqlengine/tests/query/test_queryset.py +++ b/cqlengine/tests/query/test_queryset.py @@ -346,11 +346,14 @@ def test_delete(self): assert TestModel.objects(test_id=3).count() == 0 def test_delete_without_partition_key(self): - - pass + """ Tests that attempting to delete a model without defining a partition key fails """ + with self.assertRaises(query.QueryException): + TestModel.objects(attempt_id=0).delete() def test_delete_without_any_where_args(self): - pass + """ Tests that attempting to delete a whole table without any arguments will fail """ + with self.assertRaises(query.QueryException): + TestModel.objects(attempt_id=0).delete() From 1cd37d65b0162250a2b68f84304262aaa3b79e71 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sat, 1 Dec 2012 09:43:09 -0800 Subject: [PATCH 0064/3726] removing foreign key stub --- cqlengine/columns.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index 617b0d4b12..387aaf3d8f 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -234,9 +234,3 @@ def __init__(self, **kwargs): super(Counter, self).__init__(**kwargs) raise NotImplementedError -class ForeignKey(Column): - #TODO: Foreign key field - def __init__(self, **kwargs): - super(ForeignKey, self).__init__(**kwargs) - raise NotImplementedError - From 798686cd9d7fc570a82dff48ddbafbdb8a2b57e2 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 2 Dec 2012 10:00:38 -0800 Subject: [PATCH 0065/3726] changing the create_column_family and delete_column_family management functions to create_table and delete_table, respectively --- README.md | 4 ++-- changelog | 2 ++ cqlengine/management.py | 4 ++-- cqlengine/tests/columns/test_validation.py | 10 +++++----- cqlengine/tests/model/test_model_io.py | 8 ++++---- cqlengine/tests/query/test_queryset.py | 16 ++++++++-------- 6 files changed, 23 insertions(+), 21 deletions(-) diff --git a/README.md b/README.md index 5656f0e2ce..313003ea0e 100644 --- a/README.md +++ b/README.md @@ -36,8 +36,8 @@ pip install cqlengine >>> connection.setup(['127.0.0.1:9160']) #...and create your CQL table ->>> from cqlengine.management import create_column_family ->>> create_column_family(ExampleModel) +>>> from cqlengine.management import create_table +>>> create_table(ExampleModel) #now we can create some rows: >>> em1 = ExampleModel.create(example_type=0, description="example1") diff --git a/changelog b/changelog index 6d805ec337..4f4db4ff88 100644 --- a/changelog +++ b/changelog @@ -1,6 +1,8 @@ CHANGELOG 0.0.4-ALPHA (in progress) +* changing create_column_family management function to create_table +* changing delete_column_family management function to delete_table * added partition key validation to QuerySet delete method * added .get() method to QuerySet * added create method shortcut to the model class diff --git a/cqlengine/management.py b/cqlengine/management.py index de0e93a45c..481e30364d 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -12,7 +12,7 @@ def delete_keyspace(name): if name in [k.name for k in con.con.client.describe_keyspaces()]: con.execute("DROP KEYSPACE {}".format(name)) -def create_column_family(model, create_missing_keyspace=True): +def create_table(model, create_missing_keyspace=True): #construct query string cf_name = model.column_family_name() raw_cf_name = model.column_family_name(include_keyspace=False) @@ -56,7 +56,7 @@ def add_column(col): con.execute(qs) -def delete_column_family(model): +def delete_table(model): #check that model exists cf_name = model.column_family_name() raw_cf_name = model.column_family_name(include_keyspace=False) diff --git a/cqlengine/tests/columns/test_validation.py b/cqlengine/tests/columns/test_validation.py index 3f70ab12cd..0c2b6c7da2 100644 --- a/cqlengine/tests/columns/test_validation.py +++ b/cqlengine/tests/columns/test_validation.py @@ -15,7 +15,7 @@ from cqlengine.columns import Float from cqlengine.columns import Decimal -from cqlengine.management import create_column_family, delete_column_family +from cqlengine.management import create_table, delete_table from cqlengine.models import Model class TestDatetime(BaseCassEngTestCase): @@ -26,12 +26,12 @@ class DatetimeTest(Model): @classmethod def setUpClass(cls): super(TestDatetime, cls).setUpClass() - create_column_family(cls.DatetimeTest) + create_table(cls.DatetimeTest) @classmethod def tearDownClass(cls): super(TestDatetime, cls).tearDownClass() - delete_column_family(cls.DatetimeTest) + delete_table(cls.DatetimeTest) def test_datetime_io(self): now = datetime.now() @@ -47,12 +47,12 @@ class DecimalTest(Model): @classmethod def setUpClass(cls): super(TestDecimal, cls).setUpClass() - create_column_family(cls.DecimalTest) + create_table(cls.DecimalTest) @classmethod def tearDownClass(cls): super(TestDecimal, cls).tearDownClass() - delete_column_family(cls.DecimalTest) + delete_table(cls.DecimalTest) def test_datetime_io(self): dt = self.DecimalTest.objects.create(test_id=0, dec_val=D('0.00')) diff --git a/cqlengine/tests/model/test_model_io.py b/cqlengine/tests/model/test_model_io.py index 85100eeb13..f65b44efd2 100644 --- a/cqlengine/tests/model/test_model_io.py +++ b/cqlengine/tests/model/test_model_io.py @@ -1,8 +1,8 @@ from unittest import skip from cqlengine.tests.base import BaseCassEngTestCase -from cqlengine.management import create_column_family -from cqlengine.management import delete_column_family +from cqlengine.management import create_table +from cqlengine.management import delete_table from cqlengine.models import Model from cqlengine.models import Model from cqlengine import columns @@ -16,12 +16,12 @@ class TestModelIO(BaseCassEngTestCase): @classmethod def setUpClass(cls): super(TestModelIO, cls).setUpClass() - create_column_family(TestModel) + create_table(TestModel) @classmethod def tearDownClass(cls): super(TestModelIO, cls).tearDownClass() - delete_column_family(TestModel) + delete_table(TestModel) def test_model_save_and_load(self): """ diff --git a/cqlengine/tests/query/test_queryset.py b/cqlengine/tests/query/test_queryset.py index 842d978a2f..ad3cb73e15 100644 --- a/cqlengine/tests/query/test_queryset.py +++ b/cqlengine/tests/query/test_queryset.py @@ -1,8 +1,8 @@ from cqlengine.tests.base import BaseCassEngTestCase from cqlengine.exceptions import ModelException -from cqlengine.management import create_column_family -from cqlengine.management import delete_column_family +from cqlengine.management import create_table +from cqlengine.management import delete_table from cqlengine.models import Model from cqlengine import columns from cqlengine import query @@ -106,10 +106,10 @@ class BaseQuerySetUsage(BaseCassEngTestCase): @classmethod def setUpClass(cls): super(BaseQuerySetUsage, cls).setUpClass() - delete_column_family(TestModel) - delete_column_family(IndexedTestModel) - create_column_family(TestModel) - create_column_family(IndexedTestModel) + delete_table(TestModel) + delete_table(IndexedTestModel) + create_table(TestModel) + create_table(IndexedTestModel) TestModel.objects.create(test_id=0, attempt_id=0, description='try1', expected_result=5, test_result=30) TestModel.objects.create(test_id=0, attempt_id=1, description='try2', expected_result=10, test_result=30) @@ -144,8 +144,8 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): super(BaseQuerySetUsage, cls).tearDownClass() - delete_column_family(TestModel) - delete_column_family(IndexedTestModel) + delete_table(TestModel) + delete_table(IndexedTestModel) class TestQuerySetCountSelectionAndIteration(BaseQuerySetUsage): From 112da2a6a89ea2a920fe4faa189c2b20bb575c26 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 2 Dec 2012 10:44:49 -0800 Subject: [PATCH 0066/3726] Adding sphinx documentation --- .gitignore | 3 + changelog | 1 + docs/Makefile | 153 +++++++++++++++++++++ docs/conf.py | 242 +++++++++++++++++++++++++++++++++ docs/index.rst | 100 ++++++++++++++ docs/make.bat | 190 ++++++++++++++++++++++++++ docs/topics/columns.rst | 100 ++++++++++++++ docs/topics/connection.rst | 19 +++ docs/topics/manage_schemas.rst | 43 ++++++ docs/topics/models.rst | 136 ++++++++++++++++++ docs/topics/queryset.rst | 225 ++++++++++++++++++++++++++++++ requirements.txt | 1 + 12 files changed, 1213 insertions(+) create mode 100644 docs/Makefile create mode 100644 docs/conf.py create mode 100644 docs/index.rst create mode 100644 docs/make.bat create mode 100644 docs/topics/columns.rst create mode 100644 docs/topics/connection.rst create mode 100644 docs/topics/manage_schemas.rst create mode 100644 docs/topics/models.rst create mode 100644 docs/topics/queryset.rst diff --git a/.gitignore b/.gitignore index 6290413ccc..bd169dfade 100644 --- a/.gitignore +++ b/.gitignore @@ -16,6 +16,9 @@ develop-eggs # Installer logs pip-log.txt +# Docs +html/ + # IDE .project .pydevproject diff --git a/changelog b/changelog index 4f4db4ff88..a75b17c221 100644 --- a/changelog +++ b/changelog @@ -1,6 +1,7 @@ CHANGELOG 0.0.4-ALPHA (in progress) +* added Sphinx docs * changing create_column_family management function to create_table * changing delete_column_family management function to delete_table * added partition key validation to QuerySet delete method diff --git a/docs/Makefile b/docs/Makefile new file mode 100644 index 0000000000..ca198684e1 --- /dev/null +++ b/docs/Makefile @@ -0,0 +1,153 @@ +# Makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +PAPER = +BUILDDIR = _build + +# Internal variables. +PAPEROPT_a4 = -D latex_paper_size=a4 +PAPEROPT_letter = -D latex_paper_size=letter +ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . +# the i18n builder cannot share the environment and doctrees with the others +I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . + +.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext + +help: + @echo "Please use \`make ' where is one of" + @echo " html to make standalone HTML files" + @echo " dirhtml to make HTML files named index.html in directories" + @echo " singlehtml to make a single large HTML file" + @echo " pickle to make pickle files" + @echo " json to make JSON files" + @echo " htmlhelp to make HTML files and a HTML help project" + @echo " qthelp to make HTML files and a qthelp project" + @echo " devhelp to make HTML files and a Devhelp project" + @echo " epub to make an epub" + @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" + @echo " latexpdf to make LaTeX files and run them through pdflatex" + @echo " text to make text files" + @echo " man to make manual pages" + @echo " texinfo to make Texinfo files" + @echo " info to make Texinfo files and run them through makeinfo" + @echo " gettext to make PO message catalogs" + @echo " changes to make an overview of all changed/added/deprecated items" + @echo " linkcheck to check all external links for integrity" + @echo " doctest to run all doctests embedded in the documentation (if enabled)" + +clean: + -rm -rf $(BUILDDIR)/* + +html: + $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." + +dirhtml: + $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." + +singlehtml: + $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml + @echo + @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." + +pickle: + $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle + @echo + @echo "Build finished; now you can process the pickle files." + +json: + $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json + @echo + @echo "Build finished; now you can process the JSON files." + +htmlhelp: + $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp + @echo + @echo "Build finished; now you can run HTML Help Workshop with the" \ + ".hhp project file in $(BUILDDIR)/htmlhelp." + +qthelp: + $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp + @echo + @echo "Build finished; now you can run "qcollectiongenerator" with the" \ + ".qhcp project file in $(BUILDDIR)/qthelp, like this:" + @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/cqlengine.qhcp" + @echo "To view the help file:" + @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/cqlengine.qhc" + +devhelp: + $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp + @echo + @echo "Build finished." + @echo "To view the help file:" + @echo "# mkdir -p $$HOME/.local/share/devhelp/cqlengine" + @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/cqlengine" + @echo "# devhelp" + +epub: + $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub + @echo + @echo "Build finished. The epub file is in $(BUILDDIR)/epub." + +latex: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo + @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." + @echo "Run \`make' in that directory to run these through (pdf)latex" \ + "(use \`make latexpdf' here to do that automatically)." + +latexpdf: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo "Running LaTeX files through pdflatex..." + $(MAKE) -C $(BUILDDIR)/latex all-pdf + @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." + +text: + $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text + @echo + @echo "Build finished. The text files are in $(BUILDDIR)/text." + +man: + $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man + @echo + @echo "Build finished. The manual pages are in $(BUILDDIR)/man." + +texinfo: + $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo + @echo + @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." + @echo "Run \`make' in that directory to run these through makeinfo" \ + "(use \`make info' here to do that automatically)." + +info: + $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo + @echo "Running Texinfo files through makeinfo..." + make -C $(BUILDDIR)/texinfo info + @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." + +gettext: + $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale + @echo + @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." + +changes: + $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes + @echo + @echo "The overview file is in $(BUILDDIR)/changes." + +linkcheck: + $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck + @echo + @echo "Link check complete; look for any errors in the above output " \ + "or in $(BUILDDIR)/linkcheck/output.txt." + +doctest: + $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest + @echo "Testing of doctests in the sources finished, look at the " \ + "results in $(BUILDDIR)/doctest/output.txt." diff --git a/docs/conf.py b/docs/conf.py new file mode 100644 index 0000000000..e41747b73e --- /dev/null +++ b/docs/conf.py @@ -0,0 +1,242 @@ +# -*- coding: utf-8 -*- +# +# cqlengine documentation build configuration file, created by +# sphinx-quickstart on Sat Dec 1 09:50:49 2012. +# +# This file is execfile()d with the current directory set to its containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import sys, os + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +#sys.path.insert(0, os.path.abspath('.')) + +# -- General configuration ----------------------------------------------------- + +# If your documentation needs a minimal Sphinx version, state it here. +#needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be extensions +# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. +extensions = ['sphinx.ext.autodoc', 'sphinx.ext.coverage', 'sphinx.ext.pngmath', 'sphinx.ext.mathjax'] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix of source filenames. +source_suffix = '.rst' + +# The encoding of source files. +#source_encoding = 'utf-8-sig' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = u'cqlengine' +copyright = u'2012, Blake Eggleston' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The short X.Y version. +version = '0.0.3' +# The full version, including alpha/beta/rc tags. +release = '0.0.3' + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +#language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +#today = '' +# Else, today_fmt is used as the format for a strftime call. +#today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = ['_build'] + +# The reST default role (used for this markup: `text`) to use for all documents. +#default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +#add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +#add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +#show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# A list of ignored prefixes for module index sorting. +#modindex_common_prefix = [] + + +# -- Options for HTML output --------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +html_theme = 'default' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +#html_theme_options = {} + +# Add any paths that contain custom themes here, relative to this directory. +#html_theme_path = [] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +#html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +#html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +#html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +#html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +#html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +#html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +#html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +#html_additional_pages = {} + +# If false, no module index is generated. +#html_domain_indices = True + +# If false, no index is generated. +#html_use_index = True + +# If true, the index is split into individual pages for each letter. +#html_split_index = False + +# If true, links to the reST sources are added to the pages. +#html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +#html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +#html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +#html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +#html_file_suffix = None + +# Output file base name for HTML help builder. +htmlhelp_basename = 'cqlenginedoc' + + +# -- Options for LaTeX output -------------------------------------------------- + +latex_elements = { +# The paper size ('letterpaper' or 'a4paper'). +#'papersize': 'letterpaper', + +# The font size ('10pt', '11pt' or '12pt'). +#'pointsize': '10pt', + +# Additional stuff for the LaTeX preamble. +#'preamble': '', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, author, documentclass [howto/manual]). +latex_documents = [ + ('index', 'cqlengine.tex', u'cqlengine Documentation', + u'Blake Eggleston', 'manual'), +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +#latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +#latex_use_parts = False + +# If true, show page references after internal links. +#latex_show_pagerefs = False + +# If true, show URL addresses after external links. +#latex_show_urls = False + +# Documents to append as an appendix to all manuals. +#latex_appendices = [] + +# If false, no module index is generated. +#latex_domain_indices = True + + +# -- Options for manual page output -------------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + ('index', 'cqlengine', u'cqlengine Documentation', + [u'Blake Eggleston'], 1) +] + +# If true, show URL addresses after external links. +#man_show_urls = False + + +# -- Options for Texinfo output ------------------------------------------------ + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + ('index', 'cqlengine', u'cqlengine Documentation', + u'Blake Eggleston', 'cqlengine', 'One line description of project.', + 'Miscellaneous'), +] + +# Documents to append as an appendix to all manuals. +#texinfo_appendices = [] + +# If false, no module index is generated. +#texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +#texinfo_show_urls = 'footnote' diff --git a/docs/index.rst b/docs/index.rst new file mode 100644 index 0000000000..2559a1006e --- /dev/null +++ b/docs/index.rst @@ -0,0 +1,100 @@ + +.. _index: + +======================= +cqlengine documentation +======================= + +cqlengine is a Cassandra CQL ORM for Python with an interface similar to the Django orm and mongoengine + +:ref:`getting-started` + +Contents: + +.. toctree:: + :maxdepth: 2 + + topics/models + topics/queryset + topics/columns + topics/connection + topics/manage_schemas + +.. _getting-started: + +Getting Started +=============== + + .. code-block:: python + + #first, define a model + >>> from cqlengine import columns + >>> from cqlengine import Model + + >>> class ExampleModel(Model): + >>> example_id = columns.UUID(primary_key=True) + >>> example_type = columns.Integer(index=True) + >>> created_at = columns.DateTime() + >>> description = columns.Text(required=False) + + #next, setup the connection to your cassandra server(s)... + >>> from cqlengine import connection + >>> connection.setup(['127.0.0.1:9160']) + + #...and create your CQL table + >>> from cqlengine.management import create_table + >>> create_table(ExampleModel) + + #now we can create some rows: + >>> em1 = ExampleModel.create(example_type=0, description="example1") + >>> em2 = ExampleModel.create(example_type=0, description="example2") + >>> em3 = ExampleModel.create(example_type=0, description="example3") + >>> em4 = ExampleModel.create(example_type=0, description="example4") + >>> em5 = ExampleModel.create(example_type=1, description="example5") + >>> em6 = ExampleModel.create(example_type=1, description="example6") + >>> em7 = ExampleModel.create(example_type=1, description="example7") + >>> em8 = ExampleModel.create(example_type=1, description="example8") + # Note: the UUID and DateTime columns will create uuid4 and datetime.now + # values automatically if we don't specify them when creating new rows + + #and now we can run some queries against our table + >>> ExampleModel.objects.count() + 8 + >>> q = ExampleModel.objects(example_type=1) + >>> q.count() + 4 + >>> for instance in q: + >>> print q.description + example5 + example6 + example7 + example8 + + #here we are applying additional filtering to an existing query + #query objects are immutable, so calling filter returns a new + #query object + >>> q2 = q.filter(example_id=em5.example_id) + + >>> q2.count() + 1 + >>> for instance in q2: + >>> print q.description + example5 + + +`Report a Bug `_ + +`Users Mailing List `_ + +`Dev Mailing List `_ + +**NOTE: cqlengine is in alpha and under development, some features may change. Make sure to check the changelog and test your app before upgrading** + + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` + diff --git a/docs/make.bat b/docs/make.bat new file mode 100644 index 0000000000..6be2277f78 --- /dev/null +++ b/docs/make.bat @@ -0,0 +1,190 @@ +@ECHO OFF + +REM Command file for Sphinx documentation + +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) +set BUILDDIR=_build +set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% . +set I18NSPHINXOPTS=%SPHINXOPTS% . +if NOT "%PAPER%" == "" ( + set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% + set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% +) + +if "%1" == "" goto help + +if "%1" == "help" ( + :help + echo.Please use `make ^` where ^ is one of + echo. html to make standalone HTML files + echo. dirhtml to make HTML files named index.html in directories + echo. singlehtml to make a single large HTML file + echo. pickle to make pickle files + echo. json to make JSON files + echo. htmlhelp to make HTML files and a HTML help project + echo. qthelp to make HTML files and a qthelp project + echo. devhelp to make HTML files and a Devhelp project + echo. epub to make an epub + echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter + echo. text to make text files + echo. man to make manual pages + echo. texinfo to make Texinfo files + echo. gettext to make PO message catalogs + echo. changes to make an overview over all changed/added/deprecated items + echo. linkcheck to check all external links for integrity + echo. doctest to run all doctests embedded in the documentation if enabled + goto end +) + +if "%1" == "clean" ( + for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i + del /q /s %BUILDDIR%\* + goto end +) + +if "%1" == "html" ( + %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The HTML pages are in %BUILDDIR%/html. + goto end +) + +if "%1" == "dirhtml" ( + %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. + goto end +) + +if "%1" == "singlehtml" ( + %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. + goto end +) + +if "%1" == "pickle" ( + %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle + if errorlevel 1 exit /b 1 + echo. + echo.Build finished; now you can process the pickle files. + goto end +) + +if "%1" == "json" ( + %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json + if errorlevel 1 exit /b 1 + echo. + echo.Build finished; now you can process the JSON files. + goto end +) + +if "%1" == "htmlhelp" ( + %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp + if errorlevel 1 exit /b 1 + echo. + echo.Build finished; now you can run HTML Help Workshop with the ^ +.hhp project file in %BUILDDIR%/htmlhelp. + goto end +) + +if "%1" == "qthelp" ( + %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp + if errorlevel 1 exit /b 1 + echo. + echo.Build finished; now you can run "qcollectiongenerator" with the ^ +.qhcp project file in %BUILDDIR%/qthelp, like this: + echo.^> qcollectiongenerator %BUILDDIR%\qthelp\cqlengine.qhcp + echo.To view the help file: + echo.^> assistant -collectionFile %BUILDDIR%\qthelp\cqlengine.ghc + goto end +) + +if "%1" == "devhelp" ( + %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. + goto end +) + +if "%1" == "epub" ( + %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The epub file is in %BUILDDIR%/epub. + goto end +) + +if "%1" == "latex" ( + %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex + if errorlevel 1 exit /b 1 + echo. + echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. + goto end +) + +if "%1" == "text" ( + %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The text files are in %BUILDDIR%/text. + goto end +) + +if "%1" == "man" ( + %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The manual pages are in %BUILDDIR%/man. + goto end +) + +if "%1" == "texinfo" ( + %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo. + goto end +) + +if "%1" == "gettext" ( + %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The message catalogs are in %BUILDDIR%/locale. + goto end +) + +if "%1" == "changes" ( + %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes + if errorlevel 1 exit /b 1 + echo. + echo.The overview file is in %BUILDDIR%/changes. + goto end +) + +if "%1" == "linkcheck" ( + %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck + if errorlevel 1 exit /b 1 + echo. + echo.Link check complete; look for any errors in the above output ^ +or in %BUILDDIR%/linkcheck/output.txt. + goto end +) + +if "%1" == "doctest" ( + %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest + if errorlevel 1 exit /b 1 + echo. + echo.Testing of doctests in the sources finished, look at the ^ +results in %BUILDDIR%/doctest/output.txt. + goto end +) + +:end diff --git a/docs/topics/columns.rst b/docs/topics/columns.rst new file mode 100644 index 0000000000..dc87f8cbdf --- /dev/null +++ b/docs/topics/columns.rst @@ -0,0 +1,100 @@ +======= +Columns +======= + +.. module:: cqlengine.columns + +.. class:: Bytes() + + Stores arbitrary bytes (no validation), expressed as hexadecimal :: + + columns.Bytes() + + +.. class:: Ascii() + + Stores a US-ASCII character string :: + + columns.Ascii() + + +.. class:: Text() + + Stores a UTF-8 encoded string :: + + columns.Text() + +.. class:: Integer() + + Stores an integer value :: + + columns.Integer() + +.. class:: DateTime() + + Stores a datetime value. + + Python's datetime.now callable is set as the default value for this column :: + + columns.DateTime() + +.. class:: UUID() + + Stores a type 1 or type 4 UUID. + + Python's uuid.uuid4 callable is set as the default value for this column. :: + + columns.UUID() + +.. class:: Boolean() + + Stores a boolean True or False value :: + + columns.Boolean() + +.. class:: Float() + + Stores a floating point value :: + + columns.Float() + + **options** + + :attr:`~columns.Float.double_precision` + If True, stores a double precision float value, otherwise single precision. Defaults to True. + +.. class:: Decimal() + + Stores a variable precision decimal value :: + + columns.Decimal() + +Column Options +============== + + Each column can be defined with optional arguments to modify the way they behave. While some column types may define additional column options, these are the options that are available on all columns: + + .. attribute:: BaseColumn.primary_key + + If True, this column is created as a primary key field. A model can have multiple primary keys. Defaults to False. + + *In CQL, there are 2 types of primary keys: partition keys and clustering keys. As with CQL, the first primary key is the partition key, and all others are clustering keys.* + + .. attribute:: BaseColumn.index + + If True, an index will be created for this column. Defaults to False. + + *Note: Indexes can only be created on models with one primary key* + + .. attribute:: BaseColumn.db_field + + Explicitly sets the name of the column in the database table. If this is left blank, the column name will be the same as the name of the column attribute. Defaults to None. + + .. attribute:: BaseColumn.default + + The default value for this column. If a model instance is saved without a value for this column having been defined, the default value will be used. This can be either a value or a callable object (ie: datetime.now is a valid default argument). + + .. attribute:: BaseColumn.required + + If True, this model cannot be saved without a value defined for this column. Defaults to True. Primary key fields cannot have their required fields set to False. + diff --git a/docs/topics/connection.rst b/docs/topics/connection.rst new file mode 100644 index 0000000000..14bc2341c3 --- /dev/null +++ b/docs/topics/connection.rst @@ -0,0 +1,19 @@ +============== +Connection +============== + +.. module:: cqlengine.connection + +The setup function in `cqlengine.connection` records the Cassandra servers to connect to. +If there is a problem with one of the servers, cqlengine will try to connect to each of the other connections before failing. + +.. function:: setup(hosts [, username=None, password=None]) + + :param hosts: list of hosts, strings in the :, or just + :type hosts: list + + Records the hosts and connects to one of them + +See the example at :ref:`getting-started` + + diff --git a/docs/topics/manage_schemas.rst b/docs/topics/manage_schemas.rst new file mode 100644 index 0000000000..2174e56f6a --- /dev/null +++ b/docs/topics/manage_schemas.rst @@ -0,0 +1,43 @@ +=============== +Managing Schmas +=============== + +.. module:: cqlengine.management + +Once a connection has been made to Cassandra, you can use the functions in ``cqlengine.management`` to create and delete keyspaces, as well as create and delete tables for defined models + +.. function:: create_keyspace(name) + + :param name: the keyspace name to create + :type name: string + + creates a keyspace with the given name + +.. function:: delete_keyspace(name) + + :param name: the keyspace name to delete + :type name: string + + deletes the keyspace with the given name + +.. function:: create_table(model [, create_missing_keyspace=True]) + + :param model: the :class:`~cqlengine.model.Model` class to make a table with + :type model: :class:`~cqlengine.model.Model` + :param create_missing_keyspace: *Optional* If True, the model's keyspace will be created if it does not already exist. Defaults to ``True`` + :type create_missing_keyspace: bool + + creates a CQL table for the given model + +.. function:: delete_table(model) + + :param model: the :class:`~cqlengine.model.Model` class to delete a column family for + :type model: :class:`~cqlengine.model.Model` + + deletes the CQL table for the given model + + + +See the example at :ref:`getting-started` + + diff --git a/docs/topics/models.rst b/docs/topics/models.rst new file mode 100644 index 0000000000..3ddefc37e4 --- /dev/null +++ b/docs/topics/models.rst @@ -0,0 +1,136 @@ +====== +Models +====== + +.. module:: cqlengine.models + +A model is a python class representing a CQL table. + +Example +======= + +This example defines a Person table, with the columns ``first_name`` and ``last_name`` + +.. code-block:: python + + from cqlengine import columns + from cqlengine.models import Model + + class Person(Model): + first_name = columns.Text() + last_name = columns.Text() + + +The Person model would create this CQL table: + +.. code-block:: sql + + CREATE TABLE cqlengine.person ( + id uuid, + first_name text, + last_name text, + PRIMARY KEY (id) + ) + +Columns +======= + + Columns in your models map to columns in your CQL table. You define CQL columns by defining column attributes on your model classes. For a model to be valid it needs at least one primary key column (defined automatically if you don't define one) and one non-primary key column. + + Just as in CQL, the order you define your columns in is important, and is the same order they are defined in on a model's corresponding table. + +Column Types +============ + + Each column on your model definitions needs to an instance of a Column class. The column types that are included with cqlengine as of this writing are: + + * :class:`~cqlengine.columns.Bytes` + * :class:`~cqlengine.columns.Ascii` + * :class:`~cqlengine.columns.Text` + * :class:`~cqlengine.columns.Integer` + * :class:`~cqlengine.columns.DateTime` + * :class:`~cqlengine.columns.UUID` + * :class:`~cqlengine.columns.Boolean` + * :class:`~cqlengine.columns.Float` + * :class:`~cqlengine.columns.Decimal` + + A time uuid field is in the works. + +Column Options +-------------- + + Each column can be defined with optional arguments to modify the way they behave. While some column types may define additional column options, these are the options that are available on all columns: + + :attr:`~cqlengine.columns.BaseColumn.primary_key` + If True, this column is created as a primary key field. A model can have multiple primary keys. Defaults to False. + + *In CQL, there are 2 types of primary keys: partition keys and clustering keys. As with CQL, the first primary key is the partition key, and all others are clustering keys.* + + :attr:`~cqlengine.columns.BaseColumn.index` + If True, an index will be created for this column. Defaults to False. + + *Note: Indexes can only be created on models with one primary key* + + :attr:`~cqlengine.columns.BaseColumn.db_field` + Explicitly sets the name of the column in the database table. If this is left blank, the column name will be the same as the name of the column attribute. Defaults to None. + + :attr:`~cqlengine.columns.BaseColumn.default` + The default value for this column. If a model instance is saved without a value for this column having been defined, the default value will be used. This can be either a value or a callable object (ie: datetime.now is a valid default argument). + + :attr:`~cqlengine.columns.BaseColumn.required` + If True, this model cannot be saved without a value defined for this column. Defaults to True. Primary key fields cannot have their required fields set to False. + +Model Methods +============= + Below are the methods that can be called on model instances. + +.. class:: Model(\*\*values) + + Creates an instance of the model. Pass in keyword arguments for columns you've defined on the model. + + *Example* + + .. code-block:: python + + #using the person model from earlier: + class Person(Model): + first_name = columns.Text() + last_name = columns.Text() + + person = Person(first_name='Blake', last_name='Eggleston') + person.first_name #returns 'Blake' + person.last_name #returns 'Eggleston' + + + .. method:: save() + + Saves an object to the database + + *Example* + + .. code-block:: python + + #create a person instance + person = Person(first_name='Kimberly', last_name='Eggleston') + #saves it to Cassandra + person.save() + + + .. method:: delete() + + Deletes the object from the database. + +Model Attributes +================ + + .. attribute:: Model.db_name + + *Optional.* Sets the name of the CQL table for this model. If left blank, the table name will be the name of the model, with it's module name as it's prefix + + .. attribute:: Model.keyspace + + *Optional.* Sets the name of the keyspace used by this model. Defaulst to cqlengine + +Automatic Primary Keys +====================== + CQL requires that all tables define at least one primary key. If a model definition does not include a primary key column, cqlengine will automatically add a uuid primary key column named ``id``. diff --git a/docs/topics/queryset.rst b/docs/topics/queryset.rst new file mode 100644 index 0000000000..63b76d03a6 --- /dev/null +++ b/docs/topics/queryset.rst @@ -0,0 +1,225 @@ +============== +Making Queries +============== + +.. module:: cqlengine.query + +Retrieving objects +================== + Once you've populated Cassandra with data, you'll probably want to retrieve some of it. This is accomplished with QuerySet objects. This section will describe how to use QuerySet objects to retrieve the data you're looking for. + +Retrieving all objects +---------------------- + The simplest query you can make is to return all objects from a table. + + This is accomplished with the ``.all()`` method, which returns a QuerySet of all objects in a table + + Using the Person example model, we would get all Person objects like this: + + .. code-block:: python + + all_objects = Person.objects.all() + +.. _retrieving-objects-with-filters: + +Retrieving objects with filters +---------------------------------------- + Typically, you'll want to query only a subset of the records in your database. + + That can be accomplished with the QuerySet's ``.filter(\*\*)`` method. + + For example, given the model definition: + + .. code-block:: python + + class Automobile(Model): + manufacturer = columns.Text(primary_key=True) + year = columns.Integer(primary_key=True) + model = columns.Text() + price = columns.Decimal() + + ...and assuming the Automobile table contains a record of every car model manufactured in the last 20 years or so, we can retrieve only the cars made by a single manufacturer like this: + + + .. code-block:: python + + q = Automobile.objects.filter(manufacturer='Tesla') + + We can then further filter our query with another call to **.filter** + + .. code-block:: python + + q = q.filter(year=2012) + + *Note: all queries involving any filtering MUST define either an '=' or an 'in' relation to either a primary key column, or an indexed column.* + +Accessing objects in a QuerySet +=============================== + + There are several methods for getting objects out of a queryset + + * iterating over the queryset + .. code-block:: python + + for car in Automobile.objects.all(): + #...do something to the car instance + pass + + * list index + .. code-block:: python + + q = Automobile.objects.all() + q[0] #returns the first result + q[1] #returns the second result + + + * list slicing + .. code-block:: python + + q = Automobile.objects.all() + q[1:] #returns all results except the first + q[1:9] #returns a slice of the results + + *Note: CQL does not support specifying a start position in it's queries. Therefore, accessing elements using array indexing / slicing will load every result up to the index value requested* + + * calling :attr:`get() ` on the queryset + .. code-block:: python + + q = Automobile.objects.filter(manufacturer='Tesla') + q = q.filter(year=2012) + car = q.get() + + this returns the object matching the queryset + + * calling :attr:`first() ` on the queryset + .. code-block:: python + + q = Automobile.objects.filter(manufacturer='Tesla') + q = q.filter(year=2012) + car = q.first() + + this returns the first value in the queryset + +.. _query-filtering-operators: + +Filtering Operators +=================== + + :attr:`Equal To ` + + The default filtering operator. + + .. code-block:: python + + q = Automobile.objects.filter(manufacturer='Tesla') + q = q.filter(year=2012) #year == 2012 + + In addition to simple equal to queries, cqlengine also supports querying with other operators by appending a ``__`` to the field name on the filtering call + + :attr:`in (__in) ` + + .. code-block:: python + + q = Automobile.objects.filter(manufacturer='Tesla') + q = q.filter(year__in=[2011, 2012]) + + :attr:`> (__gt) ` + + .. code-block:: python + + q = Automobile.objects.filter(manufacturer='Tesla') + q = q.filter(year__gt=2010) # year > 2010 + + :attr:`>= (__gte) ` + + .. code-block:: python + + q = Automobile.objects.filter(manufacturer='Tesla') + q = q.filter(year__gte=2010) # year >= 2010 + + :attr:`< (__lt) ` + + .. code-block:: python + + q = Automobile.objects.filter(manufacturer='Tesla') + q = q.filter(year__lt=2012) # year < 2012 + + :attr:`<= (__lte) ` + + .. code-block:: python + + q = Automobile.objects.filter(manufacturer='Tesla') + q = q.filter(year__lte=2012) # year <= 2012 + +QuerySets are imutable +====================== + + When calling any method that changes a queryset, the method does not actually change the queryset object it's called on, but returns a new queryset object with the attributes of the original queryset, plus the attributes added in the method call. + + *Example* + + .. code-block:: python + + #this produces 3 different querysets + #q does not change after it's initial definition + q = Automobiles.objects.filter(year=2012) + tesla2012 = q.filter(manufacturer='Tesla') + honda2012 = q.filter(manufacturer='Honda') + +Ordering QuerySets +================== + + Since Cassandra is essentially a distributed hash table on steroids, the order you get records back in will not be particularly predictable. + + However, you can set a column to order on with the ``.order_by(column_name)`` method. + + *Example* + + .. code-block:: python + + #sort ascending + q = Automobiles.objects.all().order_by('year') + #sort descending + q = Automobiles.objects.all().order_by('-year') + + *Note: Cassandra only supports ordering on a clustering key. In other words, to support ordering results, your model must have more than one primary key, and you must order on a primary key, excluding the first one.* + + *For instance, given our Automobile model, year is the only column we can order on.* + +QuerySet method reference +========================= + +.. class:: QuerySet + + .. method:: all() + + Returns a queryset matching all rows + + .. method:: count() + + Returns the number of matching rows in your QuerySet + + .. method:: filter(\*\*values) + + :param values: See :ref:`retrieving-objects-with-filters` + + Returns a QuerySet filtered on the keyword arguments + + .. method:: get(\*\*values) + + :param values: See :ref:`retrieving-objects-with-filters` + + Returns a single object matching the QuerySet. If no objects are matched, a :attr:`~models.Model.DoesNotExist` exception is raised. If more than one object is found, a :attr:`~models.Model.MultipleObjectsReturned` exception is raised. + + .. method:: limit(num) + + Limits the number of results returned by Cassandra. + + *Note that CQL's default limit is 10,000, so all queries without a limit set explicitly will have an implicit limit of 10,000* + + .. method:: order_by(field_name) + + :param field_name: the name of the field to order on. *Note: the field_name must be a clustering key* + :type field_name: string + + Sets the field to order on. diff --git a/requirements.txt b/requirements.txt index 8411d39556..a16d2a9cc7 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,3 +1,4 @@ cql==1.2.0 ipython==0.13.1 ipdb==0.7 +Sphinx==1.1.3 From db3a7751fb08f2041219d9900bc7ad3e73f8cd35 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 2 Dec 2012 10:51:42 -0800 Subject: [PATCH 0067/3726] updating documentation link --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 313003ea0e..4eceed1fb6 100644 --- a/README.md +++ b/README.md @@ -3,7 +3,7 @@ cqlengine cqlengine is a Cassandra CQL ORM for Python in the style of the Django orm and mongoengine -[Documentation](https://github.com/bdeggleston/cqlengine/wiki/Documentation) +[Documentation](https://cqlengine.readthedocs.org/en/latest/) [Report a Bug](https://github.com/bdeggleston/cqlengine/issues) From 7afe9300ff37c788536434fd966c274e6611700f Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 2 Dec 2012 11:02:09 -0800 Subject: [PATCH 0068/3726] updating version# --- changelog | 2 +- cqlengine/__init__.py | 2 +- docs/conf.py | 4 ++-- setup.py | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/changelog b/changelog index a75b17c221..652dda1d7c 100644 --- a/changelog +++ b/changelog @@ -1,6 +1,6 @@ CHANGELOG -0.0.4-ALPHA (in progress) +0.0.4-ALPHA * added Sphinx docs * changing create_column_family management function to create_table * changing delete_column_family management function to delete_table diff --git a/cqlengine/__init__.py b/cqlengine/__init__.py index 90414d015b..8fed278532 100644 --- a/cqlengine/__init__.py +++ b/cqlengine/__init__.py @@ -1,5 +1,5 @@ from cqlengine.columns import * from cqlengine.models import Model -__version__ = '0.0.3-ALPHA' +__version__ = '0.0.4-ALPHA' diff --git a/docs/conf.py b/docs/conf.py index e41747b73e..e15b8f3ec1 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -48,9 +48,9 @@ # built documents. # # The short X.Y version. -version = '0.0.3' +version = '0.0.4' # The full version, including alpha/beta/rc tags. -release = '0.0.3' +release = '0.0.4-ALPHA' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/setup.py b/setup.py index 8b7adfe59c..de11c701bf 100644 --- a/setup.py +++ b/setup.py @@ -4,7 +4,7 @@ #python setup.py register #python setup.py sdist upload -version = '0.0.3-ALPHA' +version = '0.0.4-ALPHA' long_desc = """ cqlengine is a Cassandra CQL ORM for Python in the style of the Django orm and mongoengine From 192f1914b3ddfe4ca647908481afdf977ea6fb79 Mon Sep 17 00:00:00 2001 From: Eric Scrivner Date: Mon, 3 Dec 2012 20:22:42 -0800 Subject: [PATCH 0069/3726] Add Connection Pooling --- cqlengine/connection.py | 88 +++++++++++++++++-- cqlengine/tests/management/test_management.py | 37 ++++++++ requirements.txt | 1 + 3 files changed, 121 insertions(+), 5 deletions(-) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index 65147f6ac9..52063d1b51 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -3,6 +3,7 @@ #http://cassandra.apache.org/doc/cql/CQL.html from collections import namedtuple +import Queue import random import cql @@ -20,6 +21,7 @@ class CQLConnectionError(CQLEngineException): pass _conn= None _username = None _password = None +_max_connections = 10 def _set_conn(host): """ @@ -28,7 +30,7 @@ def _set_conn(host): _conn = cql.connect(host.name, host.port, user=_username, password=_password) _conn.set_cql_version('3.0.0') -def setup(hosts, username=None, password=None): +def setup(hosts, username=None, password=None, max_connections=10): """ Records the hosts and connects to one of them @@ -37,10 +39,11 @@ def setup(hosts, username=None, password=None): global _hosts global _username global _password - + global _max_connections _username = username _password = password - + _max_connections = max_connections + for host in hosts: host = host.strip() host = host.split(':') @@ -57,7 +60,82 @@ def setup(hosts, username=None, password=None): random.shuffle(_hosts) host = _hosts[_host_idx] _set_conn(host) + + +class ConnectionPool(object): + """Handles pooling of database connections.""" + + # Connection pool queue + _queue = None + + @classmethod + def clear(cls): + """ + Force the connection pool to be cleared. Will close all internal + connections. + """ + try: + while not cls._queue.empty(): + cls._queue.get().close() + except: + pass + @classmethod + def get(cls): + """ + Returns a usable database connection. Uses the internal queue to + determine whether to return an existing connection or to create + a new one. + """ + try: + if cls._queue.empty(): + return cls._create_connection() + return cls._queue.get() + except CQLConnectionError as cqle: + raise cqle + except: + if not cls._queue: + cls._queue = Queue.Queue(maxsize=_max_connections) + return cls._create_connection() + + @classmethod + def put(cls, conn): + """ + Returns a connection to the queue freeing it up for other queries to + use. + + :param conn: The connection to be released + :type conn: connection + """ + try: + if cls._queue.full(): + conn.close() + else: + cls._queue.put(conn) + except: + if not cls._queue: + cls._queue = Queue.Queue(maxsize=_max_connections) + cls._queue.put(conn) + + @classmethod + def _create_connection(cls): + """ + Creates a new connection for the connection pool. + """ + global _hosts + global _username + global _password + + if not _hosts: + raise CQLConnectionError("At least one host required") + + random.shuffle(_hosts) + host = _hosts[_host_idx] + + new_conn = cql.connect(host.name, host.port, user=_username, password=_password) + new_conn.set_cql_version('3.0.0') + return new_conn + class connection_manager(object): """ @@ -67,13 +145,13 @@ def __init__(self): if not _hosts: raise CQLConnectionError("No connections have been configured, call cqlengine.connection.setup") self.keyspace = None - self.con = _conn + self.con = ConnectionPool.get() def __enter__(self): return self def __exit__(self, type, value, traceback): - pass + ConnectionPool.put(self.con) def execute(self, query, params={}): """ diff --git a/cqlengine/tests/management/test_management.py b/cqlengine/tests/management/test_management.py index e69de29bb2..e491d615a0 100644 --- a/cqlengine/tests/management/test_management.py +++ b/cqlengine/tests/management/test_management.py @@ -0,0 +1,37 @@ +from cqlengine.tests.base import BaseCassEngTestCase + +from cqlengine.connection import ConnectionPool + +from mock import Mock + + +class ConnectionPoolTestCase(BaseCassEngTestCase): + """Test cassandra connection pooling.""" + + def setUp(self): + ConnectionPool.clear() + + def test_should_create_single_connection_on_request(self): + """Should create a single connection on first request""" + result = ConnectionPool.get() + self.assertIsNotNone(result) + self.assertEquals(0, ConnectionPool._queue.qsize()) + ConnectionPool._queue.put(result) + self.assertEquals(1, ConnectionPool._queue.qsize()) + + def test_should_close_connection_if_queue_is_full(self): + """Should close additional connections if queue is full""" + connections = [ConnectionPool.get() for x in range(10)] + for conn in connections: + ConnectionPool.put(conn) + fake_conn = Mock() + ConnectionPool.put(fake_conn) + fake_conn.close.assert_called_once_with() + + def test_should_pop_connections_from_queue(self): + """Should pull existing connections off of the queue""" + conn = ConnectionPool.get() + ConnectionPool.put(conn) + self.assertEquals(1, ConnectionPool._queue.qsize()) + self.assertEquals(conn, ConnectionPool.get()) + self.assertEquals(0, ConnectionPool._queue.qsize()) diff --git a/requirements.txt b/requirements.txt index a16d2a9cc7..2415255265 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,3 +2,4 @@ cql==1.2.0 ipython==0.13.1 ipdb==0.7 Sphinx==1.1.3 +mock==1.0.1 From 5118c36de91f424a1b63839020fa6cab88734818 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Tue, 4 Dec 2012 22:29:50 -0800 Subject: [PATCH 0070/3726] expanding the connection manager a bit --- cqlengine/connection.py | 31 ++++++++++++++----------------- 1 file changed, 14 insertions(+), 17 deletions(-) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index 52063d1b51..8e1f70709d 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -23,13 +23,6 @@ class CQLConnectionError(CQLEngineException): pass _password = None _max_connections = 10 -def _set_conn(host): - """ - """ - global _conn - _conn = cql.connect(host.name, host.port, user=_username, password=_password) - _conn.set_cql_version('3.0.0') - def setup(hosts, username=None, password=None, max_connections=10): """ Records the hosts and connects to one of them @@ -58,8 +51,9 @@ def setup(hosts, username=None, password=None, max_connections=10): raise CQLConnectionError("At least one host required") random.shuffle(_hosts) - host = _hosts[_host_idx] - _set_conn(host) + + con = ConnectionPool.get() + ConnectionPool.put(con) class ConnectionPool(object): @@ -129,7 +123,6 @@ def _create_connection(cls): if not _hosts: raise CQLConnectionError("At least one host required") - random.shuffle(_hosts) host = _hosts[_host_idx] new_conn = cql.connect(host.name, host.port, user=_username, password=_password) @@ -146,12 +139,17 @@ def __init__(self): raise CQLConnectionError("No connections have been configured, call cqlengine.connection.setup") self.keyspace = None self.con = ConnectionPool.get() + self.cur = None + + def close(self): + if self.cur: self.cur.close() + ConnectionPool.put(self.con) def __enter__(self): return self def __exit__(self, type, value, traceback): - ConnectionPool.put(self.con) + self.close() def execute(self, query, params={}): """ @@ -164,18 +162,17 @@ def execute(self, query, params={}): for i in range(len(_hosts)): try: - cur = self.con.cursor() - cur.execute(query, params) - return cur + self.cur = self.con.cursor() + self.cur.execute(query, params) + return self.cur except TTransportException: #TODO: check for other errors raised in the event of a connection / server problem #move to the next connection and set the connection pool self.con = None _host_idx += 1 _host_idx %= len(_hosts) - host = _hosts[_host_idx] - _set_conn(host) - self.con = _conn + self.con.close() + self.con = ConnectionPool._create_connection() raise CQLConnectionError("couldn't reach a Cassandra server") From 43cbca139d53067a0884aea4f669bc38967726cb Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Tue, 4 Dec 2012 22:30:07 -0800 Subject: [PATCH 0071/3726] adding connection pool support to the queryset --- cqlengine/query.py | 27 +++++++++++++++++------- cqlengine/tests/query/test_queryset.py | 29 ++++++++++++++++++++++++-- 2 files changed, 47 insertions(+), 9 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index 3904ae86ee..e75f563382 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -138,7 +138,8 @@ def __init__(self, model): self._only_fields = [] #results cache - self._cursor = None + self._con = None + self._cur = None self._result_cache = None self._result_idx = None @@ -154,7 +155,7 @@ def __call__(self, **kwargs): def __deepcopy__(self, memo): clone = self.__class__(self.model) for k,v in self.__dict__.items(): - if k in ['_cursor', '_result_cache', '_result_idx']: + if k in ['_con', '_cur', '_result_cache', '_result_idx']: clone.__dict__[k] = None else: clone.__dict__[k] = copy.deepcopy(v, memo) @@ -163,6 +164,12 @@ def __deepcopy__(self, memo): def __len__(self): return self.count() + + def __del__(self): + if self._con: + self._con.close() + self._con = None + self._cur = None #----query generation / execution---- @@ -216,24 +223,30 @@ def _select_query(self): def _execute_query(self): if self._result_cache is None: - with connection_manager() as con: - self._cursor = con.execute(self._select_query(), self._where_values()) - self._result_cache = [None]*self._cursor.rowcount + self._con = connection_manager() + self._cur = self._con.execute(self._select_query(), self._where_values()) + self._result_cache = [None]*self._cur.rowcount def _fill_result_cache_to_idx(self, idx): self._execute_query() if self._result_idx is None: self._result_idx = -1 - names = [i[0] for i in self._cursor.description] qty = idx - self._result_idx if qty < 1: return else: - for values in self._cursor.fetchmany(qty): + names = [i[0] for i in self._cur.description] + for values in self._cur.fetchmany(qty): value_dict = dict(zip(names, values)) self._result_idx += 1 self._result_cache[self._result_idx] = self._construct_instance(value_dict) + + #return the connection to the connection pool if we have all objects + if self._result_cache and self._result_cache[-1] is not None: + self._con.close() + self._con = None + self._cur = None def __iter__(self): self._execute_query() diff --git a/cqlengine/tests/query/test_queryset.py b/cqlengine/tests/query/test_queryset.py index ad3cb73e15..31178b16c5 100644 --- a/cqlengine/tests/query/test_queryset.py +++ b/cqlengine/tests/query/test_queryset.py @@ -355,8 +355,33 @@ def test_delete_without_any_where_args(self): with self.assertRaises(query.QueryException): TestModel.objects(attempt_id=0).delete() - - +class TestQuerySetConnectionHandling(BaseQuerySetUsage): + + def test_conn_is_returned_after_filling_cache(self): + """ + Tests that the queryset returns it's connection after it's fetched all of it's results + """ + q = TestModel.objects(test_id=0) + #tuple of expected attempt_id, expected_result values + compare_set = set([(0,5), (1,10), (2,15), (3,20)]) + for t in q: + val = t.attempt_id, t.expected_result + assert val in compare_set + compare_set.remove(val) + + assert q._con is None + assert q._cur is None + + def test_conn_is_returned_after_queryset_is_garbage_collected(self): + """ Tests that the connection is returned to the connection pool after the queryset is gc'd """ + from cqlengine.connection import ConnectionPool + assert ConnectionPool._queue.qsize() == 1 + q = TestModel.objects(test_id=0) + v = q[0] + assert ConnectionPool._queue.qsize() == 0 + + del q + assert ConnectionPool._queue.qsize() == 1 From eb28b92dcf455307b9eb4fcb85ddd945443244be Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Tue, 4 Dec 2012 22:46:44 -0800 Subject: [PATCH 0072/3726] adding convenience filter methods --- cqlengine/models.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/cqlengine/models.py b/cqlengine/models.py index 0c201cdb6e..6e75c9acc3 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -75,6 +75,18 @@ def as_dict(self): @classmethod def create(cls, **kwargs): return cls.objects.create(**kwargs) + + @classmethod + def all(cls): + return cls.objects.all() + + @classmethod + def filter(cls, **kwargs): + return cls.objects.filter(**kwargs) + + @classmethod + def get(cls, **kwargs): + return cls.objects.get(**kwargs) def save(self): is_new = self.pk is None From 1935bd1a0b5cca3f010d2c0a07a8de3bd2602b3f Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Tue, 4 Dec 2012 22:47:47 -0800 Subject: [PATCH 0073/3726] updating version, changelog, etc --- AUTHORS | 3 ++- changelog | 4 ++++ cqlengine/__init__.py | 2 +- setup.py | 2 +- 4 files changed, 8 insertions(+), 3 deletions(-) diff --git a/AUTHORS b/AUTHORS index 3a69691a3e..b361bf0de4 100644 --- a/AUTHORS +++ b/AUTHORS @@ -4,5 +4,6 @@ Blake Eggleston CONTRIBUTORS -Eric Scrivner +Eric Scrivner - test environment, connection pooling +Jon Haddad - helped hash out some of the architecture diff --git a/changelog b/changelog index 652dda1d7c..d1d9469f68 100644 --- a/changelog +++ b/changelog @@ -1,5 +1,9 @@ CHANGELOG +0.0.5 +* added connection pooling +* adding a few convenience query classmethods to the model class + 0.0.4-ALPHA * added Sphinx docs * changing create_column_family management function to create_table diff --git a/cqlengine/__init__.py b/cqlengine/__init__.py index 8fed278532..f2c0cbcb0a 100644 --- a/cqlengine/__init__.py +++ b/cqlengine/__init__.py @@ -1,5 +1,5 @@ from cqlengine.columns import * from cqlengine.models import Model -__version__ = '0.0.4-ALPHA' +__version__ = '0.0.5' diff --git a/setup.py b/setup.py index de11c701bf..4471f9c807 100644 --- a/setup.py +++ b/setup.py @@ -4,7 +4,7 @@ #python setup.py register #python setup.py sdist upload -version = '0.0.4-ALPHA' +version = '0.0.5' long_desc = """ cqlengine is a Cassandra CQL ORM for Python in the style of the Django orm and mongoengine From 57359a87ad3a6a4609b8017b75bfa7ef0be0b975 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Wed, 5 Dec 2012 11:16:01 -0800 Subject: [PATCH 0074/3726] added timeuuid column --- cqlengine/columns.py | 11 ++++++++ cqlengine/tests/columns/test_validation.py | 33 +++++++++++++++++++++- 2 files changed, 43 insertions(+), 1 deletion(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index 387aaf3d8f..b276b5d75b 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -196,6 +196,17 @@ def validate(self, value): if not self.re_uuid.match(val): raise ValidationError("{} is not a valid uuid".format(value)) return _UUID(val) + +class TimeUUID(UUID): + """ + UUID containing timestamp + """ + + db_type = 'timeuuid' + + def __init__(self, **kwargs): + kwargs.setdefault('default', lambda: uuid1()) + super(TimeUUID, self).__init__(**kwargs) class Boolean(Column): db_type = 'boolean' diff --git a/cqlengine/tests/columns/test_validation.py b/cqlengine/tests/columns/test_validation.py index 0c2b6c7da2..a94f87a093 100644 --- a/cqlengine/tests/columns/test_validation.py +++ b/cqlengine/tests/columns/test_validation.py @@ -4,7 +4,7 @@ from cqlengine.tests.base import BaseCassEngTestCase -from cqlengine.columns import Column +from cqlengine.columns import Column, TimeUUID from cqlengine.columns import Bytes from cqlengine.columns import Ascii from cqlengine.columns import Text @@ -63,5 +63,36 @@ def test_datetime_io(self): dt2 = self.DecimalTest.objects(test_id=0).first() assert dt2.dec_val == D('5') +class TestTimeUUID(BaseCassEngTestCase): + class TimeUUIDTest(Model): + test_id = Integer(primary_key=True) + timeuuid = TimeUUID() + + @classmethod + def setUpClass(cls): + super(TestTimeUUID, cls).setUpClass() + create_table(cls.TimeUUIDTest) + + @classmethod + def tearDownClass(cls): + super(TestTimeUUID, cls).tearDownClass() + delete_table(cls.TimeUUIDTest) + + def test_timeuuid_io(self): + t0 = self.TimeUUIDTest.create(test_id=0) + t1 = self.TimeUUIDTest.get(test_id=0) + + assert t1.timeuuid.time == t1.timeuuid.time + + + + + + + + + + + From d38a71ea788b004c78928ee4752c8ecc18e74bfb Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Wed, 5 Dec 2012 11:16:37 -0800 Subject: [PATCH 0075/3726] updating version# --- changelog | 3 +++ cqlengine/__init__.py | 2 +- setup.py | 2 +- 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/changelog b/changelog index d1d9469f68..ad4f5792c6 100644 --- a/changelog +++ b/changelog @@ -1,5 +1,8 @@ CHANGELOG +0.0.6 +* added TimeUUID column + 0.0.5 * added connection pooling * adding a few convenience query classmethods to the model class diff --git a/cqlengine/__init__.py b/cqlengine/__init__.py index f2c0cbcb0a..6742297c6f 100644 --- a/cqlengine/__init__.py +++ b/cqlengine/__init__.py @@ -1,5 +1,5 @@ from cqlengine.columns import * from cqlengine.models import Model -__version__ = '0.0.5' +__version__ = '0.0.6' diff --git a/setup.py b/setup.py index 4471f9c807..f79eb17e51 100644 --- a/setup.py +++ b/setup.py @@ -4,7 +4,7 @@ #python setup.py register #python setup.py sdist upload -version = '0.0.5' +version = '0.0.6' long_desc = """ cqlengine is a Cassandra CQL ORM for Python in the style of the Django orm and mongoengine From 119c0ac001f3f09c57de1d5a293f0f2e32020d88 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Wed, 5 Dec 2012 15:30:09 -0800 Subject: [PATCH 0076/3726] updating docs version --- docs/conf.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index e15b8f3ec1..fe149b8571 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -48,9 +48,9 @@ # built documents. # # The short X.Y version. -version = '0.0.4' +version = '0.0.6' # The full version, including alpha/beta/rc tags. -release = '0.0.4-ALPHA' +release = '0.0.6' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. From 454f634b3410297ed1b2a7f67f46a044c3a3ee50 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Wed, 5 Dec 2012 16:43:58 -0800 Subject: [PATCH 0077/3726] updating setup docs link --- setup.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/setup.py b/setup.py index f79eb17e51..927db4f509 100644 --- a/setup.py +++ b/setup.py @@ -9,7 +9,7 @@ long_desc = """ cqlengine is a Cassandra CQL ORM for Python in the style of the Django orm and mongoengine -[Documentation](https://github.com/bdeggleston/cqlengine/wiki/Documentation) +[Documentation](https://cqlengine.readthedocs.org/en/latest/) [Report a Bug](https://github.com/bdeggleston/cqlengine/issues) @@ -17,7 +17,7 @@ [Dev Mailing List](https://groups.google.com/forum/?fromgroups#!forum/cqlengine-dev) -**NOTE: cqlengine is in alpha and under development, some features may change (hopefully with notice). Make sure to check the changelog and test your app before upgrading** +**NOTE: cqlengine is in alpha and under development, some features may change. Make sure to check the changelog and test your app before upgrading** """ setup( From e53a5031ae6ba8bf319e24a6617906d0c1067f65 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Fri, 7 Dec 2012 15:10:42 -0800 Subject: [PATCH 0078/3726] fixed manual table name bugs --- changelog | 4 +++ cqlengine/models.py | 33 +++++++++---------- .../tests/model/test_class_construction.py | 25 ++++++++++++++ docs/topics/models.rst | 2 +- 4 files changed, 45 insertions(+), 19 deletions(-) diff --git a/changelog b/changelog index ad4f5792c6..2546c9da0b 100644 --- a/changelog +++ b/changelog @@ -1,5 +1,9 @@ CHANGELOG +0.0.7 +* fixed manual table name bug +* changed model level db_name field to table_name + 0.0.6 * added TimeUUID column diff --git a/cqlengine/models.py b/cqlengine/models.py index 6e75c9acc3..7891fe1325 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -17,7 +17,7 @@ class MultipleObjectsReturned(QueryException): pass #table names will be generated automatically from it's model and package name #however, you can alse define them manually here - db_name = None + table_name = None #the keyspace for this model keyspace = 'cqlengine' @@ -36,21 +36,21 @@ def column_family_name(cls, include_keyspace=True): Returns the column family name if it's been defined otherwise, it creates it from the module and class name """ - if cls.db_name: - return cls.db_name.lower() - - camelcase = re.compile(r'([a-z])([A-Z])') - ccase = lambda s: camelcase.sub(lambda v: '{}_{}'.format(v.group(1), v.group(2).lower()), s) - cf_name = '' - module = cls.__module__.split('.') - if module: - cf_name = ccase(module[-1]) + '_' - - cf_name += ccase(cls.__name__) - #trim to less than 48 characters or cassandra will complain - cf_name = cf_name[-48:] - cf_name = cf_name.lower() + if cls.table_name: + cf_name = cls.table_name.lower() + else: + camelcase = re.compile(r'([a-z])([A-Z])') + ccase = lambda s: camelcase.sub(lambda v: '{}_{}'.format(v.group(1), v.group(2).lower()), s) + + module = cls.__module__.split('.') + if module: + cf_name = ccase(module[-1]) + '_' + + cf_name += ccase(cls.__name__) + #trim to less than 48 characters or cassandra will complain + cf_name = cf_name[-48:] + cf_name = cf_name.lower() if not include_keyspace: return cf_name return '{}.{}'.format(cls.keyspace, cf_name) @@ -157,9 +157,6 @@ def _transform_column(col_name, col_obj): raise ModelDefinitionException( 'Indexes on models with multiple primary keys is not supported') - #get column family name - cf_name = attrs.pop('db_name', name) - #create db_name -> model name map for loading db_map = {} for field_name, col in column_dict.items(): diff --git a/cqlengine/tests/model/test_class_construction.py b/cqlengine/tests/model/test_class_construction.py index fd218e1a5c..d01011ec42 100644 --- a/cqlengine/tests/model/test_class_construction.py +++ b/cqlengine/tests/model/test_class_construction.py @@ -3,6 +3,7 @@ from cqlengine.exceptions import ModelException from cqlengine.models import Model from cqlengine import columns +import cqlengine class TestModelClassFunction(BaseCassEngTestCase): """ @@ -103,3 +104,27 @@ def test_meta_data_is_not_inherited(self): """ Test that metadata defined in one class, is not inherited by subclasses """ + +class TestManualTableNaming(BaseCassEngTestCase): + + class RenamedTest(cqlengine.Model): + keyspace = 'whatever' + table_name = 'manual_name' + + id = cqlengine.UUID(primary_key=True) + data = cqlengine.Text() + + def test_proper_table_naming(self): + assert self.RenamedTest.column_family_name(include_keyspace=False) == 'manual_name' + assert self.RenamedTest.column_family_name(include_keyspace=True) == 'whatever.manual_name' + + + + + + + + + + + diff --git a/docs/topics/models.rst b/docs/topics/models.rst index 3ddefc37e4..86bd08d437 100644 --- a/docs/topics/models.rst +++ b/docs/topics/models.rst @@ -123,7 +123,7 @@ Model Methods Model Attributes ================ - .. attribute:: Model.db_name + .. attribute:: Model.table_name *Optional.* Sets the name of the CQL table for this model. If left blank, the table name will be the name of the model, with it's module name as it's prefix From eae926b6710cc8e74cc9a726f7db6a7628b31227 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Fri, 7 Dec 2012 15:13:00 -0800 Subject: [PATCH 0079/3726] changing version number --- cqlengine/__init__.py | 2 +- docs/conf.py | 4 ++-- setup.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cqlengine/__init__.py b/cqlengine/__init__.py index 6742297c6f..5a99e64c07 100644 --- a/cqlengine/__init__.py +++ b/cqlengine/__init__.py @@ -1,5 +1,5 @@ from cqlengine.columns import * from cqlengine.models import Model -__version__ = '0.0.6' +__version__ = '0.0.7' diff --git a/docs/conf.py b/docs/conf.py index fe149b8571..a45c245c9d 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -48,9 +48,9 @@ # built documents. # # The short X.Y version. -version = '0.0.6' +version = '0.0.7' # The full version, including alpha/beta/rc tags. -release = '0.0.6' +release = '0.0.7' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/setup.py b/setup.py index 927db4f509..a9658c482f 100644 --- a/setup.py +++ b/setup.py @@ -4,7 +4,7 @@ #python setup.py register #python setup.py sdist upload -version = '0.0.6' +version = '0.0.7' long_desc = """ cqlengine is a Cassandra CQL ORM for Python in the style of the Django orm and mongoengine From 6b202c7e76ad182c1a9290fb0c0cfdd999cdae89 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Tue, 11 Dec 2012 18:53:14 -0800 Subject: [PATCH 0080/3726] added ability to specifiy replication factor and replication strategy --- cqlengine/management.py | 11 +++++++---- cqlengine/models.py | 1 + cqlengine/tests/management/test_management.py | 10 ++++++++++ 3 files changed, 18 insertions(+), 4 deletions(-) diff --git a/cqlengine/management.py b/cqlengine/management.py index 481e30364d..4e764b55de 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -1,11 +1,11 @@ from cqlengine.connection import connection_manager -def create_keyspace(name): +def create_keyspace(name, strategy_class = 'SimpleStrategy', replication_factor=3): with connection_manager() as con: if name not in [k.name for k in con.con.client.describe_keyspaces()]: con.execute("""CREATE KEYSPACE {} - WITH strategy_class = 'SimpleStrategy' - AND strategy_options:replication_factor=1;""".format(name)) + WITH strategy_class = '{}' + AND strategy_options:replication_factor={};""".format(name, strategy_class, replication_factor)) def delete_keyspace(name): with connection_manager() as con: @@ -38,8 +38,11 @@ def add_column(col): add_column(col) qtypes.append('PRIMARY KEY ({})'.format(', '.join(pkeys))) - + qs += ['({})'.format(', '.join(qtypes))] + + # add read_repair_chance + qs += ["WITH read_repair_chance = {}".format(model.read_repair_chance)] qs = ' '.join(qs) con.execute(qs) diff --git a/cqlengine/models.py b/cqlengine/models.py index 7891fe1325..e8a270459a 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -21,6 +21,7 @@ class MultipleObjectsReturned(QueryException): pass #the keyspace for this model keyspace = 'cqlengine' + read_repair_chance = 0.1 def __init__(self, **values): self._values = {} diff --git a/cqlengine/tests/management/test_management.py b/cqlengine/tests/management/test_management.py index e491d615a0..f3862a9527 100644 --- a/cqlengine/tests/management/test_management.py +++ b/cqlengine/tests/management/test_management.py @@ -3,6 +3,7 @@ from cqlengine.connection import ConnectionPool from mock import Mock +from cqlengine import management class ConnectionPoolTestCase(BaseCassEngTestCase): @@ -35,3 +36,12 @@ def test_should_pop_connections_from_queue(self): self.assertEquals(1, ConnectionPool._queue.qsize()) self.assertEquals(conn, ConnectionPool.get()) self.assertEquals(0, ConnectionPool._queue.qsize()) + + +class CreateKeyspaceTest(BaseCassEngTestCase): + def test_create_succeeeds(self): + management.create_keyspace('test_keyspace') + management.delete_keyspace('test_keyspace') + + + \ No newline at end of file From fd347707b29d63977a5a37c934e6481c17c0e0f9 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Wed, 12 Dec 2012 11:58:02 -0800 Subject: [PATCH 0081/3726] updating version# and change log --- changelog | 4 ++++ cqlengine/__init__.py | 2 +- docs/conf.py | 4 ++-- setup.py | 2 +- 4 files changed, 8 insertions(+), 4 deletions(-) diff --git a/changelog b/changelog index 2546c9da0b..d8ee94baee 100644 --- a/changelog +++ b/changelog @@ -1,5 +1,9 @@ CHANGELOG +0.0.8 +* added configurable read repair chance to model definitions +* added configurable keyspace strategy class and replication factor to keyspace creator + 0.0.7 * fixed manual table name bug * changed model level db_name field to table_name diff --git a/cqlengine/__init__.py b/cqlengine/__init__.py index 5a99e64c07..6f020f7620 100644 --- a/cqlengine/__init__.py +++ b/cqlengine/__init__.py @@ -1,5 +1,5 @@ from cqlengine.columns import * from cqlengine.models import Model -__version__ = '0.0.7' +__version__ = '0.0.8' diff --git a/docs/conf.py b/docs/conf.py index a45c245c9d..2068f2641e 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -48,9 +48,9 @@ # built documents. # # The short X.Y version. -version = '0.0.7' +version = '0.0.8' # The full version, including alpha/beta/rc tags. -release = '0.0.7' +release = '0.0.8' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/setup.py b/setup.py index a9658c482f..45619393e9 100644 --- a/setup.py +++ b/setup.py @@ -4,7 +4,7 @@ #python setup.py register #python setup.py sdist upload -version = '0.0.7' +version = '0.0.8' long_desc = """ cqlengine is a Cassandra CQL ORM for Python in the style of the Django orm and mongoengine From 27e5069db961a23c85fe84e8a12aed9b2d36042f Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 12 Dec 2012 12:31:21 -0800 Subject: [PATCH 0082/3726] updated readme --- README.md | 61 ++++++++++++++++++++++++++++++------------------------- 1 file changed, 33 insertions(+), 28 deletions(-) diff --git a/README.md b/README.md index 4eceed1fb6..2a61f645af 100644 --- a/README.md +++ b/README.md @@ -22,43 +22,48 @@ pip install cqlengine ```python #first, define a model ->>> from cqlengine import columns ->>> from cqlengine.models import Model +from cqlengine import columns +from cqlengine.models import Model ->>> class ExampleModel(Model): ->>> example_id = columns.UUID(primary_key=True) ->>> example_type = columns.Integer(index=True) ->>> created_at = columns.DateTime() ->>> description = columns.Text(required=False) +class ExampleModel(Model): + read_repair_chance = 0.05 # optional - defaults to 0.1 + + example_id = columns.UUID(primary_key=True) + example_type = columns.Integer(index=True) + created_at = columns.DateTime() + description = columns.Text(required=False) #next, setup the connection to your cassandra server(s)... ->>> from cqlengine import connection ->>> connection.setup(['127.0.0.1:9160']) +from cqlengine import connection +connection.setup(['127.0.0.1:9160']) #...and create your CQL table ->>> from cqlengine.management import create_table ->>> create_table(ExampleModel) +from cqlengine.management import create_table +create_table(ExampleModel) #now we can create some rows: ->>> em1 = ExampleModel.create(example_type=0, description="example1") ->>> em2 = ExampleModel.create(example_type=0, description="example2") ->>> em3 = ExampleModel.create(example_type=0, description="example3") ->>> em4 = ExampleModel.create(example_type=0, description="example4") ->>> em5 = ExampleModel.create(example_type=1, description="example5") ->>> em6 = ExampleModel.create(example_type=1, description="example6") ->>> em7 = ExampleModel.create(example_type=1, description="example7") ->>> em8 = ExampleModel.create(example_type=1, description="example8") +em1 = ExampleModel.create(example_type=0, description="example1") +em2 = ExampleModel.create(example_type=0, description="example2") +em3 = ExampleModel.create(example_type=0, description="example3") +em4 = ExampleModel.create(example_type=0, description="example4") +em5 = ExampleModel.create(example_type=1, description="example5") +em6 = ExampleModel.create(example_type=1, description="example6") +em7 = ExampleModel.create(example_type=1, description="example7") +em8 = ExampleModel.create(example_type=1, description="example8") # Note: the UUID and DateTime columns will create uuid4 and datetime.now # values automatically if we don't specify them when creating new rows +# alternative syntax for creating new objects +ExampleModel(example_type=0, description="example9").save() + #and now we can run some queries against our table ->>> ExampleModel.objects.count() +ExampleModel.objects.count() 8 ->>> q = ExampleModel.objects(example_type=1) ->>> q.count() +q = ExampleModel.objects(example_type=1) +q.count() 4 ->>> for instance in q: ->>> print q.description +for instance in q: + print q.description example5 example6 example7 @@ -67,11 +72,11 @@ example8 #here we are applying additional filtering to an existing query #query objects are immutable, so calling filter returns a new #query object ->>> q2 = q.filter(example_id=em5.example_id) +q2 = q.filter(example_id=em5.example_id) ->>> q2.count() +q2.count() 1 ->>> for instance in q2: ->>> print q.description +for instance in q2: + print q.description example5 ``` From 3c5b7df847130366fae1c99f0f401a2840531720 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 12 Dec 2012 12:35:26 -0800 Subject: [PATCH 0083/3726] readme improvements --- README.md | 48 ++++++++++++++++++++++-------------------------- 1 file changed, 22 insertions(+), 26 deletions(-) diff --git a/README.md b/README.md index 2a61f645af..d70353999a 100644 --- a/README.md +++ b/README.md @@ -26,44 +26,40 @@ from cqlengine import columns from cqlengine.models import Model class ExampleModel(Model): - read_repair_chance = 0.05 # optional - defaults to 0.1 - + read_repair_chance = 0.05 # optional - defaults to 0.1 example_id = columns.UUID(primary_key=True) example_type = columns.Integer(index=True) created_at = columns.DateTime() description = columns.Text(required=False) #next, setup the connection to your cassandra server(s)... -from cqlengine import connection -connection.setup(['127.0.0.1:9160']) +>>> from cqlengine import connection +>>> connection.setup(['127.0.0.1:9160']) #...and create your CQL table -from cqlengine.management import create_table -create_table(ExampleModel) +>>> from cqlengine.management import create_table +>>> create_table(ExampleModel) #now we can create some rows: -em1 = ExampleModel.create(example_type=0, description="example1") -em2 = ExampleModel.create(example_type=0, description="example2") -em3 = ExampleModel.create(example_type=0, description="example3") -em4 = ExampleModel.create(example_type=0, description="example4") -em5 = ExampleModel.create(example_type=1, description="example5") -em6 = ExampleModel.create(example_type=1, description="example6") -em7 = ExampleModel.create(example_type=1, description="example7") -em8 = ExampleModel.create(example_type=1, description="example8") +>>> em1 = ExampleModel.create(example_type=0, description="example1") +>>> em2 = ExampleModel.create(example_type=0, description="example2") +>>> em3 = ExampleModel.create(example_type=0, description="example3") +>>> em4 = ExampleModel.create(example_type=0, description="example4") +>>> em5 = ExampleModel.create(example_type=1, description="example5") +>>> em6 = ExampleModel.create(example_type=1, description="example6") +>>> em7 = ExampleModel.create(example_type=1, description="example7") +>>> em8 = ExampleModel.create(example_type=1, description="example8") # Note: the UUID and DateTime columns will create uuid4 and datetime.now # values automatically if we don't specify them when creating new rows -# alternative syntax for creating new objects -ExampleModel(example_type=0, description="example9").save() - #and now we can run some queries against our table -ExampleModel.objects.count() +>>> ExampleModel.objects.count() 8 -q = ExampleModel.objects(example_type=1) -q.count() +>>> q = ExampleModel.objects(example_type=1) +>>> q.count() 4 -for instance in q: - print q.description +>>> for instance in q: +>>> print q.description example5 example6 example7 @@ -72,11 +68,11 @@ example8 #here we are applying additional filtering to an existing query #query objects are immutable, so calling filter returns a new #query object -q2 = q.filter(example_id=em5.example_id) +>>> q2 = q.filter(example_id=em5.example_id) -q2.count() +>>> q2.count() 1 -for instance in q2: - print q.description +>>> for instance in q2: +>>> print q.description example5 ``` From b1c9ad6f8e55876b776099b240f1872a3f26a551 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 13 Dec 2012 09:32:52 -0800 Subject: [PATCH 0084/3726] updating docs/readme --- README.md | 4 +--- docs/index.rst | 58 +++++++++++++++++++++++++------------------------- 2 files changed, 30 insertions(+), 32 deletions(-) diff --git a/README.md b/README.md index 4eceed1fb6..56422d9fce 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ cqlengine =============== -cqlengine is a Cassandra CQL ORM for Python in the style of the Django orm and mongoengine +cqlengine is a Cassandra CQL 3 ORM for Python with an interface similar to the Django orm and mongoengine [Documentation](https://cqlengine.readthedocs.org/en/latest/) @@ -11,8 +11,6 @@ cqlengine is a Cassandra CQL ORM for Python in the style of the Django orm and m [Dev Mailing List](https://groups.google.com/forum/?fromgroups#!forum/cqlengine-dev) -**NOTE: cqlengine is in alpha and under development, some features may change (hopefully with notice). Make sure to check the changelog and test your app before upgrading** - ## Installation ``` pip install cqlengine diff --git a/docs/index.rst b/docs/index.rst index 2559a1006e..c556dfd12b 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -5,7 +5,7 @@ cqlengine documentation ======================= -cqlengine is a Cassandra CQL ORM for Python with an interface similar to the Django orm and mongoengine +cqlengine is a Cassandra CQL 3 ORM for Python with an interface similar to the Django orm and mongoengine :ref:`getting-started` @@ -28,43 +28,43 @@ Getting Started .. code-block:: python #first, define a model - >>> from cqlengine import columns - >>> from cqlengine import Model + from cqlengine import columns + from cqlengine import Model - >>> class ExampleModel(Model): - >>> example_id = columns.UUID(primary_key=True) - >>> example_type = columns.Integer(index=True) - >>> created_at = columns.DateTime() - >>> description = columns.Text(required=False) + class ExampleModel(Model): + example_id = columns.UUID(primary_key=True) + example_type = columns.Integer(index=True) + created_at = columns.DateTime() + description = columns.Text(required=False) #next, setup the connection to your cassandra server(s)... - >>> from cqlengine import connection - >>> connection.setup(['127.0.0.1:9160']) + from cqlengine import connection + connection.setup(['127.0.0.1:9160']) #...and create your CQL table - >>> from cqlengine.management import create_table - >>> create_table(ExampleModel) + from cqlengine.management import create_table + create_table(ExampleModel) #now we can create some rows: - >>> em1 = ExampleModel.create(example_type=0, description="example1") - >>> em2 = ExampleModel.create(example_type=0, description="example2") - >>> em3 = ExampleModel.create(example_type=0, description="example3") - >>> em4 = ExampleModel.create(example_type=0, description="example4") - >>> em5 = ExampleModel.create(example_type=1, description="example5") - >>> em6 = ExampleModel.create(example_type=1, description="example6") - >>> em7 = ExampleModel.create(example_type=1, description="example7") - >>> em8 = ExampleModel.create(example_type=1, description="example8") + em1 = ExampleModel.create(example_type=0, description="example1") + em2 = ExampleModel.create(example_type=0, description="example2") + em3 = ExampleModel.create(example_type=0, description="example3") + em4 = ExampleModel.create(example_type=0, description="example4") + em5 = ExampleModel.create(example_type=1, description="example5") + em6 = ExampleModel.create(example_type=1, description="example6") + em7 = ExampleModel.create(example_type=1, description="example7") + em8 = ExampleModel.create(example_type=1, description="example8") # Note: the UUID and DateTime columns will create uuid4 and datetime.now # values automatically if we don't specify them when creating new rows #and now we can run some queries against our table - >>> ExampleModel.objects.count() + ExampleModel.objects.count() 8 - >>> q = ExampleModel.objects(example_type=1) - >>> q.count() + q = ExampleModel.objects(example_type=1) + q.count() 4 - >>> for instance in q: - >>> print q.description + for instance in q: + print q.description example5 example6 example7 @@ -73,12 +73,12 @@ Getting Started #here we are applying additional filtering to an existing query #query objects are immutable, so calling filter returns a new #query object - >>> q2 = q.filter(example_id=em5.example_id) + q2 = q.filter(example_id=em5.example_id) - >>> q2.count() + q2.count() 1 - >>> for instance in q2: - >>> print q.description + for instance in q2: + print q.description example5 From 96a213b4b142af07e055a2d280c47a0a141f0e27 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 13 Dec 2012 09:38:35 -0800 Subject: [PATCH 0085/3726] editing doc index --- docs/index.rst | 42 +++++++++++++++++++++--------------------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/docs/index.rst b/docs/index.rst index c556dfd12b..44b5b9b9fe 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -38,33 +38,33 @@ Getting Started description = columns.Text(required=False) #next, setup the connection to your cassandra server(s)... - from cqlengine import connection - connection.setup(['127.0.0.1:9160']) + >>> from cqlengine import connection + >>> connection.setup(['127.0.0.1:9160']) #...and create your CQL table - from cqlengine.management import create_table - create_table(ExampleModel) + >>> from cqlengine.management import create_table + >>> create_table(ExampleModel) #now we can create some rows: - em1 = ExampleModel.create(example_type=0, description="example1") - em2 = ExampleModel.create(example_type=0, description="example2") - em3 = ExampleModel.create(example_type=0, description="example3") - em4 = ExampleModel.create(example_type=0, description="example4") - em5 = ExampleModel.create(example_type=1, description="example5") - em6 = ExampleModel.create(example_type=1, description="example6") - em7 = ExampleModel.create(example_type=1, description="example7") - em8 = ExampleModel.create(example_type=1, description="example8") + >>> em1 = ExampleModel.create(example_type=0, description="example1") + >>> em2 = ExampleModel.create(example_type=0, description="example2") + >>> em3 = ExampleModel.create(example_type=0, description="example3") + >>> em4 = ExampleModel.create(example_type=0, description="example4") + >>> em5 = ExampleModel.create(example_type=1, description="example5") + >>> em6 = ExampleModel.create(example_type=1, description="example6") + >>> em7 = ExampleModel.create(example_type=1, description="example7") + >>> em8 = ExampleModel.create(example_type=1, description="example8") # Note: the UUID and DateTime columns will create uuid4 and datetime.now # values automatically if we don't specify them when creating new rows #and now we can run some queries against our table - ExampleModel.objects.count() + >>> ExampleModel.objects.count() 8 - q = ExampleModel.objects(example_type=1) - q.count() + >>> q = ExampleModel.objects(example_type=1) + >>> q.count() 4 - for instance in q: - print q.description + >>> for instance in q: + >>> print q.description example5 example6 example7 @@ -73,12 +73,12 @@ Getting Started #here we are applying additional filtering to an existing query #query objects are immutable, so calling filter returns a new #query object - q2 = q.filter(example_id=em5.example_id) + >>> q2 = q.filter(example_id=em5.example_id) - q2.count() + >>> q2.count() 1 - for instance in q2: - print q.description + >>> for instance in q2: + >>> print q.description example5 From 585c75a5bb4e0d0d482b9805c7dd40da715dfe7a Mon Sep 17 00:00:00 2001 From: Eric Scrivner Date: Thu, 13 Dec 2012 20:58:20 -0800 Subject: [PATCH 0086/3726] Fix Vagrant Box URL --- Vagrantfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Vagrantfile b/Vagrantfile index 6d8ccc8ad1..191c5e8cdc 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -11,7 +11,7 @@ Vagrant::Config.run do |config| # The url from where the 'config.vm.box' box will be fetched if it # doesn't already exist on the user's system. - config.vm.box_url = "https://s3-us-west-2.amazonaws.com/graphplatform.swmirror/precise64.box" + config.vm.box_url = "http://files.vagrantup.com/precise64.box" # Boot with a GUI so you can see the screen. (Default is headless) # config.vm.boot_mode = :gui @@ -43,4 +43,4 @@ Vagrant::Config.run do |config| # puppet.manifest_file = "site.pp" puppet.module_path = "modules" end -end \ No newline at end of file +end From dc902163455f36c8ba34337373b24814829b028e Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sat, 22 Dec 2012 21:41:09 -0800 Subject: [PATCH 0087/3726] adding IDEA project files --- .gitignore | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.gitignore b/.gitignore index bd169dfade..16029f08ec 100644 --- a/.gitignore +++ b/.gitignore @@ -23,6 +23,11 @@ html/ .project .pydevproject .settings/ +.idea/ +*.iml + +.DS_Store + # Unit test / coverage reports .coverage From cd4f9afbed749dd735880785b2061e93657032e9 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sat, 22 Dec 2012 21:41:47 -0800 Subject: [PATCH 0088/3726] fixing column inheritance and short circuiting table name inheritance --- cqlengine/models.py | 22 ++++++++++++++++--- .../tests/model/test_class_construction.py | 17 ++++++++++++++ 2 files changed, 36 insertions(+), 3 deletions(-) diff --git a/cqlengine/models.py b/cqlengine/models.py index e8a270459a..255b7ce808 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -16,8 +16,8 @@ class DoesNotExist(QueryException): pass class MultipleObjectsReturned(QueryException): pass #table names will be generated automatically from it's model and package name - #however, you can alse define them manually here - table_name = None + #however, you can also define them manually here + table_name = None #the keyspace for this model keyspace = 'cqlengine' @@ -112,6 +112,12 @@ def __new__(cls, name, bases, attrs): primary_keys = OrderedDict() pk_name = None + #get inherited properties + inherited_columns = OrderedDict() + for base in bases: + for k,v in getattr(base, '_defined_columns', {}).items(): + inherited_columns.setdefault(k,v) + def _transform_column(col_name, col_obj): column_dict[col_name] = col_obj if col_obj.primary_key: @@ -129,7 +135,13 @@ def _transform_column(col_name, col_obj): column_definitions = [(k,v) for k,v in attrs.items() if isinstance(v, columns.Column)] column_definitions = sorted(column_definitions, lambda x,y: cmp(x[1].position, y[1].position)) - #prepend primary key if none has been defined + column_definitions = inherited_columns.items() + column_definitions + + #columns defined on model, excludes automatically + #defined columns + defined_columns = OrderedDict(column_definitions) + + #prepend primary key if one hasn't been defined if not any([v.primary_key for k,v in column_definitions]): k,v = 'id', columns.UUID(primary_key=True) column_definitions = [(k,v)] + column_definitions @@ -163,9 +175,13 @@ def _transform_column(col_name, col_obj): for field_name, col in column_dict.items(): db_map[col.db_field_name] = field_name + #short circuit table_name inheritance + attrs['table_name'] = attrs.get('table_name') + #add management members to the class attrs['_columns'] = column_dict attrs['_primary_keys'] = primary_keys + attrs['_defined_columns'] = defined_columns attrs['_db_map'] = db_map attrs['_pk_name'] = pk_name attrs['_dynamic_columns'] = {} diff --git a/cqlengine/tests/model/test_class_construction.py b/cqlengine/tests/model/test_class_construction.py index d01011ec42..77b1d73775 100644 --- a/cqlengine/tests/model/test_class_construction.py +++ b/cqlengine/tests/model/test_class_construction.py @@ -80,6 +80,19 @@ class Stuff(Model): self.assertEquals(inst1.num, 5) self.assertEquals(inst2.num, 7) + def test_superclass_fields_are_inherited(self): + """ + Tests that fields defined on the super class are inherited properly + """ + class TestModel(Model): + text = columns.Text() + + class InheritedModel(TestModel): + numbers = columns.Integer() + + assert 'text' in InheritedModel._columns + assert 'numbers' in InheritedModel._columns + def test_normal_fields_can_be_defined_between_primary_keys(self): """ Tests tha non primary key fields can be defined between primary key fields @@ -118,6 +131,10 @@ def test_proper_table_naming(self): assert self.RenamedTest.column_family_name(include_keyspace=False) == 'manual_name' assert self.RenamedTest.column_family_name(include_keyspace=True) == 'whatever.manual_name' + def test_manual_table_name_is_not_inherited(self): + class InheritedTest(self.RenamedTest): pass + assert InheritedTest.table_name is None + From 3e1f96c4f0d77fdadb81bb1d7578b1aa36546a98 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sat, 22 Dec 2012 21:45:42 -0800 Subject: [PATCH 0089/3726] adding not about short circuited table_name inheritance --- docs/topics/models.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/topics/models.rst b/docs/topics/models.rst index 86bd08d437..86788a2530 100644 --- a/docs/topics/models.rst +++ b/docs/topics/models.rst @@ -125,7 +125,7 @@ Model Attributes .. attribute:: Model.table_name - *Optional.* Sets the name of the CQL table for this model. If left blank, the table name will be the name of the model, with it's module name as it's prefix + *Optional.* Sets the name of the CQL table for this model. If left blank, the table name will be the name of the model, with it's module name as it's prefix. Manually defined table names are not inherited. .. attribute:: Model.keyspace From 31633bb1a46f3d26b1d9390fefc9232de49f9b6e Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sat, 22 Dec 2012 22:06:06 -0800 Subject: [PATCH 0090/3726] updating version number and change log --- changelog | 4 ++++ cqlengine/__init__.py | 2 +- docs/conf.py | 4 ++-- setup.py | 2 +- 4 files changed, 8 insertions(+), 4 deletions(-) diff --git a/changelog b/changelog index d8ee94baee..a724be7dfa 100644 --- a/changelog +++ b/changelog @@ -1,5 +1,9 @@ CHANGELOG +0.0.9 +* fixed column inheritance bug +* manually defined table names are no longer inherited by subclasses + 0.0.8 * added configurable read repair chance to model definitions * added configurable keyspace strategy class and replication factor to keyspace creator diff --git a/cqlengine/__init__.py b/cqlengine/__init__.py index 6f020f7620..a3f227db2b 100644 --- a/cqlengine/__init__.py +++ b/cqlengine/__init__.py @@ -1,5 +1,5 @@ from cqlengine.columns import * from cqlengine.models import Model -__version__ = '0.0.8' +__version__ = '0.0.9' diff --git a/docs/conf.py b/docs/conf.py index 2068f2641e..d4defc9b6e 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -48,9 +48,9 @@ # built documents. # # The short X.Y version. -version = '0.0.8' +version = '0.0.9' # The full version, including alpha/beta/rc tags. -release = '0.0.8' +release = '0.0.9' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/setup.py b/setup.py index 45619393e9..470df3540c 100644 --- a/setup.py +++ b/setup.py @@ -4,7 +4,7 @@ #python setup.py register #python setup.py sdist upload -version = '0.0.8' +version = '0.0.9' long_desc = """ cqlengine is a Cassandra CQL ORM for Python in the style of the Django orm and mongoengine From e7d4f9bf9f260e50f70045da55a13e60c06a1c67 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 6 Jan 2013 09:31:37 -0800 Subject: [PATCH 0091/3726] Changing base exception from BaseException to Exception --- cqlengine/exceptions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cqlengine/exceptions.py b/cqlengine/exceptions.py index 0b1ff55f1e..87b5cdc4c5 100644 --- a/cqlengine/exceptions.py +++ b/cqlengine/exceptions.py @@ -1,5 +1,5 @@ #cqlengine exceptions -class CQLEngineException(BaseException): pass +class CQLEngineException(Exception): pass class ModelException(CQLEngineException): pass class ValidationError(CQLEngineException): pass From 79900278450364f8d064dcaaa7ac712a349c95bf Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 6 Jan 2013 09:37:20 -0800 Subject: [PATCH 0092/3726] fixed bug where default values that evaluate to false were ignored --- cqlengine/columns.py | 2 +- cqlengine/tests/columns/test_validation.py | 9 +++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index b276b5d75b..df01bef72e 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -97,7 +97,7 @@ def to_database(self, value): @property def has_default(self): - return bool(self.default) + return self.default is not None @property def is_primary_key(self): diff --git a/cqlengine/tests/columns/test_validation.py b/cqlengine/tests/columns/test_validation.py index a94f87a093..00a4097b18 100644 --- a/cqlengine/tests/columns/test_validation.py +++ b/cqlengine/tests/columns/test_validation.py @@ -84,6 +84,15 @@ def test_timeuuid_io(self): assert t1.timeuuid.time == t1.timeuuid.time +class TestInteger(BaseCassEngTestCase): + class IntegerTest(Model): + test_id = UUID(primary_key=True) + value = Integer(default=0) + + def test_default_zero_fields_validate(self): + """ Tests that integer columns with a default value of 0 validate """ + it = self.IntegerTest() + it.validate() From e529f33dbed84f5efd74dffc8e0192e95aa08afe Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 6 Jan 2013 10:13:04 -0800 Subject: [PATCH 0093/3726] adding magic methods for model instance equality comparisons --- cqlengine/models.py | 6 +++ .../tests/model/test_equality_operations.py | 52 +++++++++++++++++++ cqlengine/tests/model/test_model_io.py | 1 - 3 files changed, 58 insertions(+), 1 deletion(-) create mode 100644 cqlengine/tests/model/test_equality_operations.py diff --git a/cqlengine/models.py b/cqlengine/models.py index 255b7ce808..bf9d157aca 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -31,6 +31,12 @@ def __init__(self, **values): value_mngr = column.value_manager(self, column, value) self._values[name] = value_mngr + def __eq__(self, other): + return self.as_dict() == other.as_dict() + + def __ne__(self, other): + return not self.__eq__(other) + @classmethod def column_family_name(cls, include_keyspace=True): """ diff --git a/cqlengine/tests/model/test_equality_operations.py b/cqlengine/tests/model/test_equality_operations.py new file mode 100644 index 0000000000..4d5b95219b --- /dev/null +++ b/cqlengine/tests/model/test_equality_operations.py @@ -0,0 +1,52 @@ +from unittest import skip +from cqlengine.tests.base import BaseCassEngTestCase + +from cqlengine.management import create_table +from cqlengine.management import delete_table +from cqlengine.models import Model +from cqlengine import columns + +class TestModel(Model): + count = columns.Integer() + text = columns.Text(required=False) + +class TestEqualityOperators(BaseCassEngTestCase): + + @classmethod + def setUpClass(cls): + super(TestEqualityOperators, cls).setUpClass() + create_table(TestModel) + + def setUp(self): + super(TestEqualityOperators, self).setUp() + self.t0 = TestModel.create(count=5, text='words') + self.t1 = TestModel.create(count=5, text='words') + + @classmethod + def tearDownClass(cls): + super(TestEqualityOperators, cls).tearDownClass() + delete_table(TestModel) + + def test_an_instance_evaluates_as_equal_to_itself(self): + """ + """ + assert self.t0 == self.t0 + + def test_two_instances_referencing_the_same_rows_and_different_values_evaluate_not_equal(self): + """ + """ + t0 = TestModel.get(id=self.t0.id) + t0.text = 'bleh' + assert t0 != self.t0 + + def test_two_instances_referencing_the_same_rows_and_values_evaluate_equal(self): + """ + """ + t0 = TestModel.get(id=self.t0.id) + assert t0 == self.t0 + + def test_two_instances_referencing_different_rows_evaluate_to_not_equal(self): + """ + """ + assert self.t0 != self.t1 + diff --git a/cqlengine/tests/model/test_model_io.py b/cqlengine/tests/model/test_model_io.py index f65b44efd2..73eb1e2496 100644 --- a/cqlengine/tests/model/test_model_io.py +++ b/cqlengine/tests/model/test_model_io.py @@ -4,7 +4,6 @@ from cqlengine.management import create_table from cqlengine.management import delete_table from cqlengine.models import Model -from cqlengine.models import Model from cqlengine import columns class TestModel(Model): From 16e4a6293288d272c0e8d02f9ae6e682bebca761 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 6 Jan 2013 10:28:07 -0800 Subject: [PATCH 0094/3726] adding min & max length validation to the Text column type --- cqlengine/columns.py | 17 +++++++++ cqlengine/tests/columns/test_validation.py | 43 ++++++++++++++++++++++ 2 files changed, 60 insertions(+) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index df01bef72e..2f6ee4f4ac 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -146,6 +146,23 @@ class Ascii(Column): class Text(Column): db_type = 'text' + def __init__(self, *args, **kwargs): + self.min_length = kwargs.pop('min_length', 1 if kwargs.get('required', True) else None) + self.max_length = kwargs.pop('max_length', None) + super(Text, self).__init__(*args, **kwargs) + + def validate(self, value): + value = super(Text, self).validate(value) + if not isinstance(value, (basestring, bytearray)) and value is not None: + raise ValidationError('{} is not a string'.format(type(value))) + if self.max_length: + if len(value) > self.max_length: + raise ValidationError('{} is longer than {} characters'.format(self.column_name, self.max_length)) + if self.min_length: + if len(value) < self.min_length: + raise ValidationError('{} is shorter than {} characters'.format(self.column_name, self.min_length)) + return value + class Integer(Column): db_type = 'int' diff --git a/cqlengine/tests/columns/test_validation.py b/cqlengine/tests/columns/test_validation.py index 00a4097b18..4921b67256 100644 --- a/cqlengine/tests/columns/test_validation.py +++ b/cqlengine/tests/columns/test_validation.py @@ -1,6 +1,7 @@ #tests the behavior of the column classes from datetime import datetime from decimal import Decimal as D +from cqlengine import ValidationError from cqlengine.tests.base import BaseCassEngTestCase @@ -94,6 +95,48 @@ def test_default_zero_fields_validate(self): it = self.IntegerTest() it.validate() +class TestText(BaseCassEngTestCase): + + def test_min_length(self): + #min len defaults to 1 + col = Text() + + with self.assertRaises(ValidationError): + col.validate('') + + col.validate('b') + + #test not required defaults to 0 + Text(required=False).validate('') + + #test arbitrary lengths + Text(min_length=0).validate('') + Text(min_length=5).validate('blake') + Text(min_length=5).validate('blaketastic') + with self.assertRaises(ValidationError): + Text(min_length=6).validate('blake') + + def test_max_length(self): + + Text(max_length=5).validate('blake') + with self.assertRaises(ValidationError): + Text(max_length=5).validate('blaketastic') + + def test_type_checking(self): + Text().validate('string') + Text().validate(u'unicode') + Text().validate(bytearray('bytearray')) + + with self.assertRaises(ValidationError): + Text().validate(None) + + with self.assertRaises(ValidationError): + Text().validate(5) + + with self.assertRaises(ValidationError): + Text().validate(True) + + From d0ac0cfcd99cb2619401e886c5fed7f058edfa5f Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 6 Jan 2013 10:38:10 -0800 Subject: [PATCH 0095/3726] updating version number, documentation, and changelog --- changelog | 5 +++++ cqlengine/__init__.py | 2 +- docs/conf.py | 4 ++-- docs/topics/columns.rst | 8 ++++++++ setup.py | 2 +- 5 files changed, 17 insertions(+), 4 deletions(-) diff --git a/changelog b/changelog index a724be7dfa..8763c4ad70 100644 --- a/changelog +++ b/changelog @@ -1,5 +1,10 @@ CHANGELOG +0.1 +* added min_length and max_length validators to the Text column +* added == and != equality operators to model class +* fixed bug with default values that would evaluate to False (ie: 0, '') + 0.0.9 * fixed column inheritance bug * manually defined table names are no longer inherited by subclasses diff --git a/cqlengine/__init__.py b/cqlengine/__init__.py index a3f227db2b..33cf4f1397 100644 --- a/cqlengine/__init__.py +++ b/cqlengine/__init__.py @@ -1,5 +1,5 @@ from cqlengine.columns import * from cqlengine.models import Model -__version__ = '0.0.9' +__version__ = '0.1' diff --git a/docs/conf.py b/docs/conf.py index d4defc9b6e..f2d45ab7a1 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -48,9 +48,9 @@ # built documents. # # The short X.Y version. -version = '0.0.9' +version = '0.1' # The full version, including alpha/beta/rc tags. -release = '0.0.9' +release = '0.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/docs/topics/columns.rst b/docs/topics/columns.rst index dc87f8cbdf..9090b0bc0f 100644 --- a/docs/topics/columns.rst +++ b/docs/topics/columns.rst @@ -24,6 +24,14 @@ Columns columns.Text() + **options** + + :attr:`~columns.Text.min_length` + Sets the minimum length of this string. If this field is not set , and the column is not a required field, it defaults to 0, otherwise 1. + + :attr:`~columns.Text.max_length` + Sets the maximum length of this string. Defaults to None + .. class:: Integer() Stores an integer value :: diff --git a/setup.py b/setup.py index 470df3540c..65c4806c53 100644 --- a/setup.py +++ b/setup.py @@ -4,7 +4,7 @@ #python setup.py register #python setup.py sdist upload -version = '0.0.9' +version = '0.1' long_desc = """ cqlengine is a Cassandra CQL ORM for Python in the style of the Django orm and mongoengine From 9386d55513ae9233e2281c8f0ffbabf37ce656f9 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Fri, 8 Feb 2013 07:04:49 -0800 Subject: [PATCH 0096/3726] fixed a bug in table auto naming, which would fail if the generated name started with an _ --- cqlengine/models.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cqlengine/models.py b/cqlengine/models.py index 255b7ce808..f979a84e48 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -52,6 +52,7 @@ def column_family_name(cls, include_keyspace=True): #trim to less than 48 characters or cassandra will complain cf_name = cf_name[-48:] cf_name = cf_name.lower() + cf_name = re.sub(r'^_+', '', cf_name) if not include_keyspace: return cf_name return '{}.{}'.format(cls.keyspace, cf_name) From 2aa17094e3af71bb3cad7b3508f703376973e3d4 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Fri, 8 Feb 2013 07:28:19 -0800 Subject: [PATCH 0097/3726] adding check for existing indexes --- cqlengine/management.py | 9 +++++++-- cqlengine/tests/model/test_model_io.py | 6 ++++++ 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/cqlengine/management.py b/cqlengine/management.py index 4e764b55de..7031b8585c 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -47,11 +47,16 @@ def add_column(col): con.execute(qs) + #get existing index names + ks_info = con.con.client.describe_keyspace(model.keyspace) + cf_def = [cf for cf in ks_info.cf_defs if cf.name == raw_cf_name][0] + idx_names = [i.index_name for i in cf_def.column_metadata] + idx_names = filter(None, idx_names) + indexes = [c for n,c in model._columns.items() if c.index] if indexes: for column in indexes: - #TODO: check for existing index... - #can that be determined from the connection client? + if column.db_index_name in idx_names: continue qs = ['CREATE INDEX {}'.format(column.db_index_name)] qs += ['ON {}'.format(cf_name)] qs += ['({})'.format(column.db_field_name)] diff --git a/cqlengine/tests/model/test_model_io.py b/cqlengine/tests/model/test_model_io.py index f65b44efd2..61e9e73996 100644 --- a/cqlengine/tests/model/test_model_io.py +++ b/cqlengine/tests/model/test_model_io.py @@ -66,3 +66,9 @@ def test_column_deleting_works_properly(self): assert tm2._values['text'].initial_value is None + def test_a_sensical_error_is_raised_if_you_try_to_create_a_table_twice(self): + """ + """ + create_table(TestModel) + create_table(TestModel) + From b8ba592ede1d59a2ee5b3d9ef213c8eb7b5ab6f8 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Fri, 8 Feb 2013 07:32:39 -0800 Subject: [PATCH 0098/3726] version number bump --- changelog | 3 +++ cqlengine/__init__.py | 2 +- docs/conf.py | 4 ++-- setup.py | 2 +- 4 files changed, 7 insertions(+), 4 deletions(-) diff --git a/changelog b/changelog index 8763c4ad70..153d5617cd 100644 --- a/changelog +++ b/changelog @@ -1,5 +1,8 @@ CHANGELOG +0.1.1 +* fixed a bug occurring when creating tables from the REPL + 0.1 * added min_length and max_length validators to the Text column * added == and != equality operators to model class diff --git a/cqlengine/__init__.py b/cqlengine/__init__.py index 33cf4f1397..cb73106c62 100644 --- a/cqlengine/__init__.py +++ b/cqlengine/__init__.py @@ -1,5 +1,5 @@ from cqlengine.columns import * from cqlengine.models import Model -__version__ = '0.1' +__version__ = '0.1.1' diff --git a/docs/conf.py b/docs/conf.py index f2d45ab7a1..d25ee3a0bb 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -48,9 +48,9 @@ # built documents. # # The short X.Y version. -version = '0.1' +version = '0.1.1' # The full version, including alpha/beta/rc tags. -release = '0.1' +release = '0.1.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/setup.py b/setup.py index 65c4806c53..f1878a24b3 100644 --- a/setup.py +++ b/setup.py @@ -4,7 +4,7 @@ #python setup.py register #python setup.py sdist upload -version = '0.1' +version = '0.1.1' long_desc = """ cqlengine is a Cassandra CQL ORM for Python in the style of the Django orm and mongoengine From ba53c61b7b66cac2340b845e7cccdfa6911ce805 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Fri, 8 Feb 2013 09:11:29 -0800 Subject: [PATCH 0099/3726] fixing potential bug with column family discovery --- cqlengine/management.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cqlengine/management.py b/cqlengine/management.py index 7031b8585c..65d6b0e2e3 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -49,8 +49,8 @@ def add_column(col): #get existing index names ks_info = con.con.client.describe_keyspace(model.keyspace) - cf_def = [cf for cf in ks_info.cf_defs if cf.name == raw_cf_name][0] - idx_names = [i.index_name for i in cf_def.column_metadata] + cf_defs = [cf for cf in ks_info.cf_defs if cf.name == raw_cf_name] + idx_names = [i.index_name for i in cf_defs[0].column_metadata] if cf_defs else [] idx_names = filter(None, idx_names) indexes = [c for n,c in model._columns.items() if c.index] From 7467f868976f61785229e0461b4a147b1a30d26c Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Fri, 8 Feb 2013 09:12:12 -0800 Subject: [PATCH 0100/3726] version number bump --- cqlengine/__init__.py | 2 +- docs/conf.py | 4 ++-- setup.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cqlengine/__init__.py b/cqlengine/__init__.py index cb73106c62..1f8c70ad36 100644 --- a/cqlengine/__init__.py +++ b/cqlengine/__init__.py @@ -1,5 +1,5 @@ from cqlengine.columns import * from cqlengine.models import Model -__version__ = '0.1.1' +__version__ = '0.1.2' diff --git a/docs/conf.py b/docs/conf.py index d25ee3a0bb..59c9a1239b 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -48,9 +48,9 @@ # built documents. # # The short X.Y version. -version = '0.1.1' +version = '0.1.2' # The full version, including alpha/beta/rc tags. -release = '0.1.1' +release = '0.1.2' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/setup.py b/setup.py index f1878a24b3..4190fb384f 100644 --- a/setup.py +++ b/setup.py @@ -4,7 +4,7 @@ #python setup.py register #python setup.py sdist upload -version = '0.1.1' +version = '0.1.2' long_desc = """ cqlengine is a Cassandra CQL ORM for Python in the style of the Django orm and mongoengine From b76244a2f4acffe6b2e0d5bab5f21f21a5111588 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Fri, 8 Feb 2013 09:44:13 -0800 Subject: [PATCH 0101/3726] updating docs --- README.md | 16 ++++++++-------- docs/index.rst | 16 ++++++++-------- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/README.md b/README.md index 8af88071ca..3e5b06bfa6 100644 --- a/README.md +++ b/README.md @@ -39,14 +39,14 @@ class ExampleModel(Model): >>> create_table(ExampleModel) #now we can create some rows: ->>> em1 = ExampleModel.create(example_type=0, description="example1") ->>> em2 = ExampleModel.create(example_type=0, description="example2") ->>> em3 = ExampleModel.create(example_type=0, description="example3") ->>> em4 = ExampleModel.create(example_type=0, description="example4") ->>> em5 = ExampleModel.create(example_type=1, description="example5") ->>> em6 = ExampleModel.create(example_type=1, description="example6") ->>> em7 = ExampleModel.create(example_type=1, description="example7") ->>> em8 = ExampleModel.create(example_type=1, description="example8") +>>> em1 = ExampleModel.create(example_type=0, description="example1", created_at=datetime.now()) +>>> em2 = ExampleModel.create(example_type=0, description="example2", created_at=datetime.now()) +>>> em3 = ExampleModel.create(example_type=0, description="example3", created_at=datetime.now()) +>>> em4 = ExampleModel.create(example_type=0, description="example4", created_at=datetime.now()) +>>> em5 = ExampleModel.create(example_type=1, description="example5", created_at=datetime.now()) +>>> em6 = ExampleModel.create(example_type=1, description="example6", created_at=datetime.now()) +>>> em7 = ExampleModel.create(example_type=1, description="example7", created_at=datetime.now()) +>>> em8 = ExampleModel.create(example_type=1, description="example8", created_at=datetime.now()) # Note: the UUID and DateTime columns will create uuid4 and datetime.now # values automatically if we don't specify them when creating new rows diff --git a/docs/index.rst b/docs/index.rst index 44b5b9b9fe..dbc64e54f9 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -46,14 +46,14 @@ Getting Started >>> create_table(ExampleModel) #now we can create some rows: - >>> em1 = ExampleModel.create(example_type=0, description="example1") - >>> em2 = ExampleModel.create(example_type=0, description="example2") - >>> em3 = ExampleModel.create(example_type=0, description="example3") - >>> em4 = ExampleModel.create(example_type=0, description="example4") - >>> em5 = ExampleModel.create(example_type=1, description="example5") - >>> em6 = ExampleModel.create(example_type=1, description="example6") - >>> em7 = ExampleModel.create(example_type=1, description="example7") - >>> em8 = ExampleModel.create(example_type=1, description="example8") + >>> em1 = ExampleModel.create(example_type=0, description="example1", created_at=datetime.now()) + >>> em2 = ExampleModel.create(example_type=0, description="example2", created_at=datetime.now()) + >>> em3 = ExampleModel.create(example_type=0, description="example3", created_at=datetime.now()) + >>> em4 = ExampleModel.create(example_type=0, description="example4", created_at=datetime.now()) + >>> em5 = ExampleModel.create(example_type=1, description="example5", created_at=datetime.now()) + >>> em6 = ExampleModel.create(example_type=1, description="example6", created_at=datetime.now()) + >>> em7 = ExampleModel.create(example_type=1, description="example7", created_at=datetime.now()) + >>> em8 = ExampleModel.create(example_type=1, description="example8", created_at=datetime.now()) # Note: the UUID and DateTime columns will create uuid4 and datetime.now # values automatically if we don't specify them when creating new rows From 2d6aab505b6a387ede22b3f80da028306d8ed631 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sat, 23 Feb 2013 10:01:54 -0800 Subject: [PATCH 0102/3726] adding support for 1.2 style keyspace creation --- cqlengine/connection.py | 2 ++ cqlengine/management.py | 46 ++++++++++++++++++++++++++++++++++------- 2 files changed, 41 insertions(+), 7 deletions(-) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index 8e1f70709d..52bc5cdef1 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -165,6 +165,8 @@ def execute(self, query, params={}): self.cur = self.con.cursor() self.cur.execute(query, params) return self.cur + except cql.ProgrammingError as ex: + raise CQLEngineException(unicode(ex)) except TTransportException: #TODO: check for other errors raised in the event of a connection / server problem #move to the next connection and set the connection pool diff --git a/cqlengine/management.py b/cqlengine/management.py index 65d6b0e2e3..7b4e52abe2 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -1,11 +1,43 @@ +import json + from cqlengine.connection import connection_manager +from cqlengine.exceptions import CQLEngineException + +def create_keyspace(name, strategy_class='SimpleStrategy', replication_factor=3, durable_writes=True, **replication_values): + """ + creates a keyspace -def create_keyspace(name, strategy_class = 'SimpleStrategy', replication_factor=3): + :param name: name of keyspace to create + :param strategy_class: keyspace replication strategy class + :param replication_factor: keyspace replication factor + :param durable_writes: 1.2 only, write log is bypassed if set to False + :param **replication_values: 1.2 only, additional values to ad to the replication data map + """ with connection_manager() as con: if name not in [k.name for k in con.con.client.describe_keyspaces()]: - con.execute("""CREATE KEYSPACE {} - WITH strategy_class = '{}' - AND strategy_options:replication_factor={};""".format(name, strategy_class, replication_factor)) + + try: + #Try the 1.1 method + con.execute("""CREATE KEYSPACE {} + WITH strategy_class = '{}' + AND strategy_options:replication_factor={};""".format(name, strategy_class, replication_factor)) + except CQLEngineException: + #try the 1.2 method + replication_map = { + 'class': strategy_class, + 'replication_factor':replication_factor + } + replication_map.update(replication_values) + + query = """ + CREATE KEYSPACE {} + WITH REPLICATION = {} + """.format(name, json.dumps(replication_map).replace('"', "'")) + + if strategy_class != 'SimpleStrategy': + query += " AND DURABLE_WRITES = {}".format('true' if durable_writes else 'false') + + con.execute(query) def delete_keyspace(name): with connection_manager() as con: @@ -31,8 +63,8 @@ def create_table(model, create_missing_keyspace=True): pkeys = [] qtypes = [] def add_column(col): - s = '{} {}'.format(col.db_field_name, col.db_type) - if col.primary_key: pkeys.append(col.db_field_name) + s = '"{}" {}'.format(col.db_field_name, col.db_type) + if col.primary_key: pkeys.append('"{}"'.format(col.db_field_name)) qtypes.append(s) for name, col in model._columns.items(): add_column(col) @@ -42,7 +74,7 @@ def add_column(col): qs += ['({})'.format(', '.join(qtypes))] # add read_repair_chance - qs += ["WITH read_repair_chance = {}".format(model.read_repair_chance)] + qs += ['WITH read_repair_chance = {}'.format(model.read_repair_chance)] qs = ' '.join(qs) con.execute(qs) From daadbeb364c59998fd543d7e789bd0f171202209 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sat, 23 Feb 2013 10:12:34 -0800 Subject: [PATCH 0103/3726] adding workaround for detecting existing tables, since 1.2 won't return the column family names with the describe keyspace method --- cqlengine/management.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/cqlengine/management.py b/cqlengine/management.py index 7b4e52abe2..28da51c868 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -15,7 +15,6 @@ def create_keyspace(name, strategy_class='SimpleStrategy', replication_factor=3, """ with connection_manager() as con: if name not in [k.name for k in con.con.client.describe_keyspaces()]: - try: #Try the 1.1 method con.execute("""CREATE KEYSPACE {} @@ -77,7 +76,13 @@ def add_column(col): qs += ['WITH read_repair_chance = {}'.format(model.read_repair_chance)] qs = ' '.join(qs) - con.execute(qs) + try: + con.execute(qs) + except CQLEngineException as ex: + # 1.2 doesn't return cf names, so we have to examine the exception + # and ignore if it says the column family already exists + if "Cannot add already existing column family" not in unicode(ex): + raise #get existing index names ks_info = con.con.client.describe_keyspace(model.keyspace) From 7fce1cda23d8a402fef5767e6a166ae309ee3597 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sat, 23 Feb 2013 11:03:59 -0800 Subject: [PATCH 0104/3726] fixing index creation to deal with broken schema discovery in cassandra 1.2 --- cqlengine/management.py | 13 ++++++++--- cqlengine/query.py | 10 ++++----- cqlengine/tests/model/test_model_io.py | 30 ++++++++++++++++++++++++++ cqlengine/tests/query/test_queryset.py | 4 ++-- 4 files changed, 47 insertions(+), 10 deletions(-) diff --git a/cqlengine/management.py b/cqlengine/management.py index 28da51c868..2b45622129 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -84,7 +84,7 @@ def add_column(col): if "Cannot add already existing column family" not in unicode(ex): raise - #get existing index names + #get existing index names, skip ones that already exist ks_info = con.con.client.describe_keyspace(model.keyspace) cf_defs = [cf for cf in ks_info.cf_defs if cf.name == raw_cf_name] idx_names = [i.index_name for i in cf_defs[0].column_metadata] if cf_defs else [] @@ -96,9 +96,16 @@ def add_column(col): if column.db_index_name in idx_names: continue qs = ['CREATE INDEX {}'.format(column.db_index_name)] qs += ['ON {}'.format(cf_name)] - qs += ['({})'.format(column.db_field_name)] + qs += ['("{}")'.format(column.db_field_name)] qs = ' '.join(qs) - con.execute(qs) + + try: + con.execute(qs) + except CQLEngineException as ex: + # 1.2 doesn't return cf names, so we have to examine the exception + # and ignore if it says the index already exists + if "Index already exists" not in unicode(ex): + raise def delete_table(model): diff --git a/cqlengine/query.py b/cqlengine/query.py index e75f563382..0b1973f7ef 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -39,7 +39,7 @@ def cql(self): Returns this operator's portion of the WHERE clause :param valname: the dict key that this operator's compare value will be found in """ - return '{} {} :{}'.format(self.column.db_field_name, self.cql_symbol, self.identifier) + return '"{}" {} :{}'.format(self.column.db_field_name, self.cql_symbol, self.identifier) def validate_operator(self): """ @@ -205,7 +205,7 @@ def _select_query(self): fields = [f for f in fields if f in self._only_fields] db_fields = [self.model._columns[f].db_field_name for f in fields] - qs = ['SELECT {}'.format(', '.join(db_fields))] + qs = ['SELECT {}'.format(', '.join(['"{}"'.format(f) for f in db_fields]))] qs += ['FROM {}'.format(self.column_family_name)] if self._where: @@ -389,7 +389,7 @@ def order_by(self, colname): "Can't order by the first primary key, clustering (secondary) keys only") clone = copy.deepcopy(self) - clone._order = '{} {}'.format(column.db_field_name, order_type) + clone._order = '"{}" {}'.format(column.db_field_name, order_type) return clone def count(self): @@ -478,7 +478,7 @@ def save(self, instance): field_names = zip(*value_pairs)[0] field_values = dict(value_pairs) qs = ["INSERT INTO {}".format(self.column_family_name)] - qs += ["({})".format(', '.join(field_names))] + qs += ["({})".format(', '.join(['"{}"'.format(f) for f in field_names]))] qs += ['VALUES'] qs += ["({})".format(', '.join([':'+f for f in field_names]))] qs = ' '.join(qs) @@ -492,7 +492,7 @@ def save(self, instance): del_fields = [self.model._columns[f] for f in deleted] del_fields = [f.db_field_name for f in del_fields if not f.primary_key] pks = self.model._primary_keys - qs = ['DELETE {}'.format(', '.join(del_fields))] + qs = ['DELETE {}'.format(', '.join(['"{}"'.format(f) for f in del_fields]))] qs += ['FROM {}'.format(self.column_family_name)] qs += ['WHERE'] eq = lambda col: '{0} = :{0}'.format(v.column.db_field_name) diff --git a/cqlengine/tests/model/test_model_io.py b/cqlengine/tests/model/test_model_io.py index 68dc18c340..e24da14175 100644 --- a/cqlengine/tests/model/test_model_io.py +++ b/cqlengine/tests/model/test_model_io.py @@ -71,3 +71,33 @@ def test_a_sensical_error_is_raised_if_you_try_to_create_a_table_twice(self): create_table(TestModel) create_table(TestModel) +class IndexDefinitionModel(Model): + key = columns.UUID(primary_key=True) + val = columns.Text(index=True) + +class TestIndexedColumnDefinition(BaseCassEngTestCase): + + def test_exception_isnt_raised_if_an_index_is_defined_more_than_once(self): + create_table(IndexDefinitionModel) + create_table(IndexDefinitionModel) + +class ReservedWordModel(Model): + token = columns.Text(primary_key=True) + insert = columns.Integer(index=True) + +class TestQueryQuoting(BaseCassEngTestCase): + + def test_reserved_cql_words_can_be_used_as_column_names(self): + """ + """ + create_table(ReservedWordModel) + + model1 = ReservedWordModel.create(token='1', insert=5) + + model2 = ReservedWordModel.filter(token=1) + + assert len(model2) == 1 + assert model1.token == model2[0].token + assert model1.insert == model2[0].insert + + diff --git a/cqlengine/tests/query/test_queryset.py b/cqlengine/tests/query/test_queryset.py index 31178b16c5..c80a9386e9 100644 --- a/cqlengine/tests/query/test_queryset.py +++ b/cqlengine/tests/query/test_queryset.py @@ -55,12 +55,12 @@ def test_where_clause_generation(self): query1 = TestModel.objects(test_id=5) ids = [o.identifier for o in query1._where] where = query1._where_clause() - assert where == 'test_id = :{}'.format(*ids) + assert where == '"test_id" = :{}'.format(*ids) query2 = query1.filter(expected_result__gte=1) ids = [o.identifier for o in query2._where] where = query2._where_clause() - assert where == 'test_id = :{} AND expected_result >= :{}'.format(*ids) + assert where == '"test_id" = :{} AND "expected_result" >= :{}'.format(*ids) def test_querystring_generation(self): From f37ee97f2e06f97815c0fdd059798b5f3e121ac0 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sat, 23 Feb 2013 12:51:46 -0800 Subject: [PATCH 0105/3726] adding support for the allow filtering flag in cassandra 1.2 --- cqlengine/columns.py | 3 +++ cqlengine/models.py | 4 ++++ cqlengine/query.py | 25 +++++++++++++++++++++++++ cqlengine/tests/query/test_queryset.py | 6 +++++- 4 files changed, 37 insertions(+), 1 deletion(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index 2f6ee4f4ac..b9d3d7c136 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -59,6 +59,9 @@ def __init__(self, primary_key=False, index=False, db_field=None, default=None, self.default = default self.required = required + #only the model meta class should touch this + self._partition_key = False + #the column name in the model definition self.column_name = None diff --git a/cqlengine/models.py b/cqlengine/models.py index 45475bfa03..07a7b47900 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -118,6 +118,7 @@ def __new__(cls, name, bases, attrs): column_dict = OrderedDict() primary_keys = OrderedDict() pk_name = None + primary_key = None #get inherited properties inherited_columns = OrderedDict() @@ -158,6 +159,8 @@ def _transform_column(col_name, col_obj): for k,v in column_definitions: if pk_name is None and v.primary_key: pk_name = k + primary_key = v + v._partition_key = True _transform_column(k,v) #setup primary key shortcut @@ -191,6 +194,7 @@ def _transform_column(col_name, col_obj): attrs['_defined_columns'] = defined_columns attrs['_db_map'] = db_map attrs['_pk_name'] = pk_name + attrs['_primary_key'] = primary_key attrs['_dynamic_columns'] = {} #create the class and add a QuerySet to it diff --git a/cqlengine/query.py b/cqlengine/query.py index 0b1973f7ef..d76e14c004 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -129,6 +129,8 @@ def __init__(self, model): #ordering arguments self._order = None + self._allow_filtering = False + #CQL has a default limit of 10000, it's defined here #because explicit is better than implicit self._limit = 10000 @@ -180,6 +182,14 @@ def _validate_where_syntax(self): equal_ops = [w for w in self._where if isinstance(w, EqualsOperator)] if not any([w.column.primary_key or w.column.index for w in equal_ops]): raise QueryException('Where clauses require either a "=" or "IN" comparison with either a primary key or indexed field') + + if not self._allow_filtering: + #if the query is not on an indexed field + if not any([w.column.index for w in equal_ops]): + if not any([w.column._partition_key for w in equal_ops]): + raise QueryException('Filtering on a clustering key without a partition key is not allowed unless allow_filtering() is called on the querset') + + #TODO: abuse this to see if we can get cql to raise an exception def _where_clause(self): @@ -217,6 +227,9 @@ def _select_query(self): if self._limit: qs += ['LIMIT {}'.format(self._limit)] + if self._allow_filtering: + qs += ['ALLOW FILTERING'] + return ' '.join(qs) #----Reads------ @@ -400,6 +413,9 @@ def count(self): qs += ['FROM {}'.format(self.column_family_name)] if self._where: qs += ['WHERE {}'.format(self._where_clause())] + if self._allow_filtering: + qs += ['ALLOW FILTERING'] + qs = ' '.join(qs) with connection_manager() as con: @@ -425,6 +441,15 @@ def limit(self, v): clone._limit = v return clone + def allow_filtering(self): + """ + Enables the unwise practive of querying on a clustering + key without also defining a partition key + """ + clone = copy.deepcopy(self) + clone._allow_filtering = True + return clone + def _only_or_defer(self, action, fields): clone = copy.deepcopy(self) if clone._defer_fields or clone._only_fields: diff --git a/cqlengine/tests/query/test_queryset.py b/cqlengine/tests/query/test_queryset.py index c80a9386e9..3eff054f7b 100644 --- a/cqlengine/tests/query/test_queryset.py +++ b/cqlengine/tests/query/test_queryset.py @@ -165,7 +165,7 @@ def test_iteration(self): compare_set.remove(val) assert len(compare_set) == 0 - q = TestModel.objects(attempt_id=3) + q = TestModel.objects(attempt_id=3).allow_filtering() assert len(q) == 3 #tuple of expected test_id, expected_result values compare_set = set([(0,20), (1,20), (2,75)]) @@ -241,6 +241,10 @@ def test_get_multipleobjects_exception(self): with self.assertRaises(TestModel.MultipleObjectsReturned): TestModel.objects.get(test_id=1) + def test_allow_filtering_flag(self): + """ + """ + class TestQuerySetOrdering(BaseQuerySetUsage): From 584c439434ebe93253fdc39ae098133c0f7db6ee Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sat, 23 Feb 2013 13:35:32 -0800 Subject: [PATCH 0106/3726] updating delete table to work with Cassandra 1.2 --- cqlengine/management.py | 9 +++++---- cqlengine/tests/management/test_management.py | 16 +++++++++++++--- cqlengine/tests/query/test_datetime_queries.py | 1 + 3 files changed, 19 insertions(+), 7 deletions(-) create mode 100644 cqlengine/tests/query/test_datetime_queries.py diff --git a/cqlengine/management.py b/cqlengine/management.py index 2b45622129..20ec5741f3 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -109,11 +109,12 @@ def add_column(col): def delete_table(model): - #check that model exists cf_name = model.column_family_name() - raw_cf_name = model.column_family_name(include_keyspace=False) with connection_manager() as con: - ks_info = con.con.client.describe_keyspace(model.keyspace) - if any([raw_cf_name == cf.name for cf in ks_info.cf_defs]): + try: con.execute('drop table {};'.format(cf_name)) + except CQLEngineException as ex: + #don't freak out if the table doesn't exist + if 'Cannot drop non existing column family' not in unicode(ex): + raise diff --git a/cqlengine/tests/management/test_management.py b/cqlengine/tests/management/test_management.py index f3862a9527..ac542bb8e6 100644 --- a/cqlengine/tests/management/test_management.py +++ b/cqlengine/tests/management/test_management.py @@ -1,9 +1,11 @@ +from cqlengine.management import create_table, delete_table from cqlengine.tests.base import BaseCassEngTestCase from cqlengine.connection import ConnectionPool from mock import Mock from cqlengine import management +from cqlengine.tests.query.test_queryset import TestModel class ConnectionPoolTestCase(BaseCassEngTestCase): @@ -42,6 +44,14 @@ class CreateKeyspaceTest(BaseCassEngTestCase): def test_create_succeeeds(self): management.create_keyspace('test_keyspace') management.delete_keyspace('test_keyspace') - - - \ No newline at end of file + +class DeleteTableTest(BaseCassEngTestCase): + + def test_multiple_deletes_dont_fail(self): + """ + + """ + create_table(TestModel) + + delete_table(TestModel) + delete_table(TestModel) diff --git a/cqlengine/tests/query/test_datetime_queries.py b/cqlengine/tests/query/test_datetime_queries.py new file mode 100644 index 0000000000..f6150a6d76 --- /dev/null +++ b/cqlengine/tests/query/test_datetime_queries.py @@ -0,0 +1 @@ +__author__ = 'bdeggleston' From a20a26340787676847ef5a58e1d43602f6b89874 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sat, 23 Feb 2013 13:35:59 -0800 Subject: [PATCH 0107/3726] fixing datetime query bug --- cqlengine/query.py | 2 +- .../tests/query/test_datetime_queries.py | 48 ++++++++++++++++++- cqlengine/tests/query/test_queryset.py | 2 - 3 files changed, 48 insertions(+), 4 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index d76e14c004..840d7b2fe0 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -74,7 +74,7 @@ def get_dict(self): this should return the dict: {'colval':} SELECT * FROM column_family WHERE colname=:colval """ - return {self.identifier: self.value} + return {self.identifier: self.column.to_database(self.value)} @classmethod def get_operator(cls, symbol): diff --git a/cqlengine/tests/query/test_datetime_queries.py b/cqlengine/tests/query/test_datetime_queries.py index f6150a6d76..044e12328c 100644 --- a/cqlengine/tests/query/test_datetime_queries.py +++ b/cqlengine/tests/query/test_datetime_queries.py @@ -1 +1,47 @@ -__author__ = 'bdeggleston' +from datetime import datetime, timedelta +from uuid import uuid4 + +from cqlengine.tests.base import BaseCassEngTestCase + +from cqlengine.exceptions import ModelException +from cqlengine.management import create_table +from cqlengine.management import delete_table +from cqlengine.models import Model +from cqlengine import columns +from cqlengine import query + +class DateTimeQueryTestModel(Model): + user = columns.Integer(primary_key=True) + day = columns.DateTime(primary_key=True) + data = columns.Text() + +class TestDateTimeQueries(BaseCassEngTestCase): + + @classmethod + def setUpClass(cls): + super(TestDateTimeQueries, cls).setUpClass() + create_table(DateTimeQueryTestModel) + + cls.base_date = datetime.now() - timedelta(days=10) + for x in range(7): + for y in range(10): + DateTimeQueryTestModel.create( + user=x, + day=(cls.base_date+timedelta(days=y)), + data=str(uuid4()) + ) + + + @classmethod + def tearDownClass(cls): + super(TestDateTimeQueries, cls).tearDownClass() + delete_table(DateTimeQueryTestModel) + + def test_range_query(self): + """ Tests that loading from a range of dates works properly """ + start = datetime(*self.base_date.timetuple()[:3]) + end = start + timedelta(days=3) + + results = DateTimeQueryTestModel.filter(user=0, day__gte=start, day__lt=end) + assert len(results) == 3 + diff --git a/cqlengine/tests/query/test_queryset.py b/cqlengine/tests/query/test_queryset.py index 3eff054f7b..cf4eb9d1b0 100644 --- a/cqlengine/tests/query/test_queryset.py +++ b/cqlengine/tests/query/test_queryset.py @@ -245,7 +245,6 @@ def test_allow_filtering_flag(self): """ """ - class TestQuerySetOrdering(BaseQuerySetUsage): def test_order_by_success_case(self): @@ -392,4 +391,3 @@ def test_conn_is_returned_after_queryset_is_garbage_collected(self): - From 64a68e9cf9e045cd64a97a00db2750be02971a1e Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 24 Feb 2013 19:43:51 -0800 Subject: [PATCH 0108/3726] fixing date time precision, and using utc time when turning the cal timestamp into a datetime --- cqlengine/columns.py | 5 +++-- cqlengine/tests/query/test_datetime_queries.py | 10 ++++++++++ 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index b9d3d7c136..15a02748d7 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -190,13 +190,14 @@ def __init__(self, **kwargs): def to_python(self, value): if isinstance(value, datetime): return value - return datetime.fromtimestamp(value) + return datetime.utcfromtimestamp(value) def to_database(self, value): value = super(DateTime, self).to_database(value) if not isinstance(value, datetime): raise ValidationError("'{}' is not a datetime object".format(value)) - return value.strftime('%Y-%m-%d %H:%M:%S') + epoch = datetime(1970, 1, 1) + return long((value - epoch).total_seconds() * 1000) class UUID(Column): """ diff --git a/cqlengine/tests/query/test_datetime_queries.py b/cqlengine/tests/query/test_datetime_queries.py index 044e12328c..e374bd7d18 100644 --- a/cqlengine/tests/query/test_datetime_queries.py +++ b/cqlengine/tests/query/test_datetime_queries.py @@ -45,3 +45,13 @@ def test_range_query(self): results = DateTimeQueryTestModel.filter(user=0, day__gte=start, day__lt=end) assert len(results) == 3 + def test_datetime_precision(self): + """ Tests that millisecond resolution is preserved when saving datetime objects """ + now = datetime.now() + pk = 1000 + obj = DateTimeQueryTestModel.create(user=pk, day=now, data='energy cheese') + load = DateTimeQueryTestModel.get(user=pk) + + assert abs(now - load.day).total_seconds() < 0.001 + obj.delete() + From b2e40c1b97b313e6d66a97bd075374bcda23f7b7 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Fri, 1 Mar 2013 07:36:28 -0800 Subject: [PATCH 0109/3726] updating changelog --- changelog | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/changelog b/changelog index 153d5617cd..aa066984d3 100644 --- a/changelog +++ b/changelog @@ -1,5 +1,11 @@ CHANGELOG +0.1.2 (in progress) +* adding support for allow filtering flag +* updating management functions to work with cassandra 1.2 +* fixed a bug querying datetimes +* modifying datetime serialization to preserver millisecond accuracy + 0.1.1 * fixed a bug occurring when creating tables from the REPL From 320682c76de1db16545771e3ad3e7452c5d33906 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Fri, 1 Mar 2013 08:00:05 -0800 Subject: [PATCH 0110/3726] adding support for cal helper functions --- cqlengine/functions.py | 48 ++++++++++++++++++++ cqlengine/models.py | 1 + cqlengine/query.py | 6 ++- cqlengine/tests/query/test_queryoperators.py | 32 +++++++++++++ 4 files changed, 86 insertions(+), 1 deletion(-) create mode 100644 cqlengine/functions.py diff --git a/cqlengine/functions.py b/cqlengine/functions.py new file mode 100644 index 0000000000..f2f6166185 --- /dev/null +++ b/cqlengine/functions.py @@ -0,0 +1,48 @@ +from datetime import datetime + +from cqlengine.exceptions import ValidationError + +class BaseQueryFunction(object): + """ + Base class for filtering functions. Subclasses of these classes can + be passed into .filter() and will be translated into CQL functions in + the resulting query + """ + + _cql_string = None + + def __init__(self, value): + self.value = value + + def to_cql(self, value_id): + """ + Returns a function for cql with the value id as it's argument + """ + return self._cql_string.format(value_id) + +class MinTimeUUID(BaseQueryFunction): + + _cql_string = 'MinTimeUUID(:{})' + + def __init__(self, value): + """ + :param value: the time to create a maximum time uuid from + :type value: datetime + """ + if not isinstance(value, datetime): + raise ValidationError('datetime instance is required') + super(MinTimeUUID, self).__init__(value) + +class MaxTimeUUID(BaseQueryFunction): + + _cql_string = 'MaxTimeUUID(:{})' + + def __init__(self, value): + """ + :param value: the time to create a minimum time uuid from + :type value: datetime + """ + if not isinstance(value, datetime): + raise ValidationError('datetime instance is required') + super(MaxTimeUUID, self).__init__(value) + diff --git a/cqlengine/models.py b/cqlengine/models.py index 07a7b47900..a856c59f2f 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -3,6 +3,7 @@ from cqlengine import columns from cqlengine.exceptions import ModelException +from cqlengine.functions import BaseQueryFunction from cqlengine.query import QuerySet, QueryException class ModelDefinitionException(ModelException): pass diff --git a/cqlengine/query.py b/cqlengine/query.py index 840d7b2fe0..94c4544d5b 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -5,6 +5,7 @@ from cqlengine.connection import connection_manager from cqlengine.exceptions import CQLEngineException +from cqlengine.functions import BaseQueryFunction #CQL 3 reference: #http://www.datastax.com/docs/1.1/references/cql/index @@ -39,7 +40,10 @@ def cql(self): Returns this operator's portion of the WHERE clause :param valname: the dict key that this operator's compare value will be found in """ - return '"{}" {} :{}'.format(self.column.db_field_name, self.cql_symbol, self.identifier) + if isinstance(self.value, BaseQueryFunction): + return '"{}" {} {}'.format(self.column.db_field_name, self.cql_symbol, self.value.to_cql(self.identifier)) + else: + return '"{}" {} :{}'.format(self.column.db_field_name, self.cql_symbol, self.identifier) def validate_operator(self): """ diff --git a/cqlengine/tests/query/test_queryoperators.py b/cqlengine/tests/query/test_queryoperators.py index e69de29bb2..62ffea9cb3 100644 --- a/cqlengine/tests/query/test_queryoperators.py +++ b/cqlengine/tests/query/test_queryoperators.py @@ -0,0 +1,32 @@ +from datetime import datetime + +from cqlengine.tests.base import BaseCassEngTestCase +from cqlengine import columns +from cqlengine import functions +from cqlengine import query + +class TestQuerySetOperation(BaseCassEngTestCase): + + def test_maxtimeuuid_function(self): + """ + Tests that queries with helper functions are generated properly + """ + now = datetime.now() + col = columns.DateTime() + col.set_column_name('time') + qry = query.EqualsOperator(col, functions.MaxTimeUUID(now)) + + assert qry.cql == '"time" = MaxTimeUUID(:{})'.format(qry.identifier) + + def test_mintimeuuid_function(self): + """ + Tests that queries with helper functions are generated properly + """ + now = datetime.now() + col = columns.DateTime() + col.set_column_name('time') + qry = query.EqualsOperator(col, functions.MinTimeUUID(now)) + + assert qry.cql == '"time" = MinTimeUUID(:{})'.format(qry.identifier) + + From b4e09a324c9ba89b142d2ff764a851c69da635dc Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sat, 2 Mar 2013 17:13:51 -0800 Subject: [PATCH 0111/3726] testing min and max timeuuid functions with related debugging --- cqlengine/functions.py | 11 +++++ cqlengine/query.py | 5 +- cqlengine/tests/query/test_queryoperators.py | 4 +- cqlengine/tests/query/test_queryset.py | 50 ++++++++++++++++++++ 4 files changed, 68 insertions(+), 2 deletions(-) diff --git a/cqlengine/functions.py b/cqlengine/functions.py index f2f6166185..15e93d4659 100644 --- a/cqlengine/functions.py +++ b/cqlengine/functions.py @@ -20,6 +20,9 @@ def to_cql(self, value_id): """ return self._cql_string.format(value_id) + def get_value(self): + raise NotImplementedError + class MinTimeUUID(BaseQueryFunction): _cql_string = 'MinTimeUUID(:{})' @@ -33,6 +36,10 @@ def __init__(self, value): raise ValidationError('datetime instance is required') super(MinTimeUUID, self).__init__(value) + def get_value(self): + epoch = datetime(1970, 1, 1) + return long((self.value - epoch).total_seconds() * 1000) + class MaxTimeUUID(BaseQueryFunction): _cql_string = 'MaxTimeUUID(:{})' @@ -46,3 +53,7 @@ def __init__(self, value): raise ValidationError('datetime instance is required') super(MaxTimeUUID, self).__init__(value) + def get_value(self): + epoch = datetime(1970, 1, 1) + return long((self.value - epoch).total_seconds() * 1000) + diff --git a/cqlengine/query.py b/cqlengine/query.py index 94c4544d5b..7a9a74e77a 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -78,7 +78,10 @@ def get_dict(self): this should return the dict: {'colval':} SELECT * FROM column_family WHERE colname=:colval """ - return {self.identifier: self.column.to_database(self.value)} + if isinstance(self.value, BaseQueryFunction): + return {self.identifier: self.column.to_database(self.value.get_value())} + else: + return {self.identifier: self.column.to_database(self.value)} @classmethod def get_operator(cls, symbol): diff --git a/cqlengine/tests/query/test_queryoperators.py b/cqlengine/tests/query/test_queryoperators.py index 62ffea9cb3..5db4a299af 100644 --- a/cqlengine/tests/query/test_queryoperators.py +++ b/cqlengine/tests/query/test_queryoperators.py @@ -1,7 +1,8 @@ from datetime import datetime +import time from cqlengine.tests.base import BaseCassEngTestCase -from cqlengine import columns +from cqlengine import columns, Model from cqlengine import functions from cqlengine import query @@ -30,3 +31,4 @@ def test_mintimeuuid_function(self): assert qry.cql == '"time" = MinTimeUUID(:{})'.format(qry.identifier) + diff --git a/cqlengine/tests/query/test_queryset.py b/cqlengine/tests/query/test_queryset.py index cf4eb9d1b0..99f45afacd 100644 --- a/cqlengine/tests/query/test_queryset.py +++ b/cqlengine/tests/query/test_queryset.py @@ -1,6 +1,11 @@ +from datetime import datetime +import time +from uuid import uuid1, uuid4 + from cqlengine.tests.base import BaseCassEngTestCase from cqlengine.exceptions import ModelException +from cqlengine import functions from cqlengine.management import create_table from cqlengine.management import delete_table from cqlengine.models import Model @@ -386,6 +391,51 @@ def test_conn_is_returned_after_queryset_is_garbage_collected(self): del q assert ConnectionPool._queue.qsize() == 1 +class TimeUUIDQueryModel(Model): + partition = columns.UUID(primary_key=True) + time = columns.TimeUUID(primary_key=True) + data = columns.Text(required=False) + +class TestMinMaxTimeUUIDFunctions(BaseCassEngTestCase): + + @classmethod + def setUpClass(cls): + super(TestMinMaxTimeUUIDFunctions, cls).setUpClass() + create_table(TimeUUIDQueryModel) + + @classmethod + def tearDownClass(cls): + super(TestMinMaxTimeUUIDFunctions, cls).tearDownClass() + delete_table(TimeUUIDQueryModel) + + def test_success_case(self): + """ Test that the min and max time uuid functions work as expected """ + pk = uuid4() + TimeUUIDQueryModel.create(partition=pk, time=uuid1(), data='1') + time.sleep(0.2) + TimeUUIDQueryModel.create(partition=pk, time=uuid1(), data='2') + time.sleep(0.2) + midpoint = datetime.utcnow() + time.sleep(0.2) + TimeUUIDQueryModel.create(partition=pk, time=uuid1(), data='3') + time.sleep(0.2) + TimeUUIDQueryModel.create(partition=pk, time=uuid1(), data='4') + time.sleep(0.2) + + q = TimeUUIDQueryModel.filter(partition=pk, time__lte=functions.MaxTimeUUID(midpoint)) + q = [d for d in q] + assert len(q) == 2 + datas = [d.data for d in q] + assert '1' in datas + assert '2' in datas + + q = TimeUUIDQueryModel.filter(partition=pk, time__gte=functions.MinTimeUUID(midpoint)) + assert len(q) == 2 + datas = [d.data for d in q] + assert '3' in datas + assert '4' in datas + + From 684501de389f016fad9b9ee1fb352ea0a6c99abb Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sat, 2 Mar 2013 19:31:11 -0800 Subject: [PATCH 0112/3726] updating changelog --- changelog | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/changelog b/changelog index aa066984d3..c5f26914a5 100644 --- a/changelog +++ b/changelog @@ -4,7 +4,8 @@ CHANGELOG * adding support for allow filtering flag * updating management functions to work with cassandra 1.2 * fixed a bug querying datetimes -* modifying datetime serialization to preserver millisecond accuracy +* modifying datetime serialization to preserve millisecond accuracy +* adding cql function call generators MaxTimeUUID and MinTimeUUID 0.1.1 * fixed a bug occurring when creating tables from the REPL From 9aba6e469e27692fde5173d9d21c729236d22c9a Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Tue, 5 Mar 2013 21:26:15 -0800 Subject: [PATCH 0113/3726] adding container columns --- cqlengine/columns.py | 169 +++++++++++++++++- cqlengine/management.py | 2 +- .../tests/columns/test_container_columns.py | 116 ++++++++++++ 3 files changed, 284 insertions(+), 3 deletions(-) create mode 100644 cqlengine/tests/columns/test_container_columns.py diff --git a/cqlengine/columns.py b/cqlengine/columns.py index 15a02748d7..fe7ad9e348 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -1,7 +1,9 @@ #column field types +from copy import copy from datetime import datetime import re from uuid import uuid1, uuid4 +from cql.query import cql_quote from cqlengine.exceptions import ValidationError @@ -10,7 +12,7 @@ class BaseValueManager(object): def __init__(self, instance, column, value): self.instance = instance self.column = column - self.initial_value = value + self.initial_value = copy(value) self.value = value @property @@ -121,7 +123,7 @@ def get_column_def(self): """ Returns a column definition for CQL table definition """ - return '{} {}'.format(self.db_field_name, self.db_type) + return '"{}" {}'.format(self.db_field_name, self.db_type) def set_column_name(self, name): """ @@ -156,6 +158,7 @@ def __init__(self, *args, **kwargs): def validate(self, value): value = super(Text, self).validate(value) + if value is None: return if not isinstance(value, (basestring, bytearray)) and value is not None: raise ValidationError('{} is not a string'.format(type(value))) if self.max_length: @@ -171,6 +174,7 @@ class Integer(Column): def validate(self, value): val = super(Integer, self).validate(value) + if val is None: return try: return long(val) except (TypeError, ValueError): @@ -212,6 +216,7 @@ def __init__(self, default=lambda:uuid4(), **kwargs): def validate(self, value): val = super(UUID, self).validate(value) + if val is None: return from uuid import UUID as _UUID if isinstance(val, _UUID): return val if not self.re_uuid.match(val): @@ -246,6 +251,8 @@ def __init__(self, double_precision=True, **kwargs): super(Float, self).__init__(**kwargs) def validate(self, value): + value = super(Float, self).validate(value) + if value is None: return try: return float(value) except (TypeError, ValueError): @@ -266,3 +273,161 @@ def __init__(self, **kwargs): super(Counter, self).__init__(**kwargs) raise NotImplementedError +class ContainerValueManager(BaseValueManager): + pass + +class ContainerQuoter(object): + """ + contains a single value, which will quote itself for CQL insertion statements + """ + def __init__(self, value): + self.value = value + + def __str__(self): + raise NotImplementedError + +class BaseContainerColumn(Column): + """ + Base Container type + """ + + def __init__(self, value_type, **kwargs): + """ + :param value_type: a column class indicating the types of the value + """ + if not issubclass(value_type, Column): + raise ValidationError('value_type must be a column class') + if issubclass(value_type, BaseContainerColumn): + raise ValidationError('container types cannot be nested') + if value_type.db_type is None: + raise ValidationError('value_type cannot be an abstract column type') + + self.value_type = value_type + self.value_col = self.value_type() + super(BaseContainerColumn, self).__init__(**kwargs) + + def get_column_def(self): + """ + Returns a column definition for CQL table definition + """ + db_type = self.db_type.format(self.value_type.db_type) + return '{} {}'.format(self.db_field_name, db_type) + +class Set(BaseContainerColumn): + """ + Stores a set of unordered, unique values + + http://www.datastax.com/docs/1.2/cql_cli/using/collections + """ + db_type = 'set<{}>' + + class Quoter(ContainerQuoter): + + def __str__(self): + cq = cql_quote + return '{' + ', '.join([cq(v) for v in self.value]) + '}' + + def __init__(self, value_type, strict=True, **kwargs): + """ + :param value_type: a column class indicating the types of the value + :param strict: sets whether non set values will be coerced to set + type on validation, or raise a validation error, defaults to True + """ + self.strict = strict + super(Set, self).__init__(value_type, **kwargs) + + def validate(self, value): + val = super(Set, self).validate(value) + if val is None: return + types = (set,) if self.strict else (set, list, tuple) + if not isinstance(val, types): + if self.strict: + raise ValidationError('{} is not a set object'.format(val)) + else: + raise ValidationError('{} cannot be coerced to a set object'.format(val)) + + return {self.value_col.validate(v) for v in val} + + def to_database(self, value): + return self.Quoter({self.value_col.to_database(v) for v in value}) + +class List(BaseContainerColumn): + """ + Stores a list of ordered values + + http://www.datastax.com/docs/1.2/cql_cli/using/collections_list + """ + db_type = 'list<{}>' + + class Quoter(ContainerQuoter): + + def __str__(self): + cq = cql_quote + return '[' + ', '.join([cq(v) for v in self.value]) + ']' + + def validate(self, value): + val = super(List, self).validate(value) + if val is None: return + if not isinstance(val, (set, list, tuple)): + raise ValidationError('{} is not a list object'.format(val)) + return [self.value_col.validate(v) for v in val] + + def to_database(self, value): + return self.Quoter([self.value_col.to_database(v) for v in value]) + +class Map(BaseContainerColumn): + """ + Stores a key -> value map (dictionary) + + http://www.datastax.com/docs/1.2/cql_cli/using/collections_map + """ + + db_type = 'map<{}, {}>' + + class Quoter(ContainerQuoter): + + def __str__(self): + cq = cql_quote + return '{' + ', '.join([cq(k) + ':' + cq(v) for k,v in self.value.items()]) + '}' + + def __init__(self, key_type, value_type, **kwargs): + """ + :param key_type: a column class indicating the types of the key + :param value_type: a column class indicating the types of the value + """ + if not issubclass(value_type, Column): + raise ValidationError('key_type must be a column class') + if issubclass(value_type, BaseContainerColumn): + raise ValidationError('container types cannot be nested') + if key_type.db_type is None: + raise ValidationError('key_type cannot be an abstract column type') + + self.key_type = key_type + self.key_col = self.key_type() + super(Map, self).__init__(value_type, **kwargs) + + def get_column_def(self): + """ + Returns a column definition for CQL table definition + """ + db_type = self.db_type.format( + self.key_type.db_type, + self.value_type.db_type + ) + return '{} {}'.format(self.db_field_name, db_type) + + def validate(self, value): + val = super(Map, self).validate(value) + if val is None: return + if not isinstance(val, dict): + raise ValidationError('{} is not a dict object'.format(val)) + return {self.key_col.validate(k):self.value_col.validate(v) for k,v in val.items()} + + def to_python(self, value): + if value is not None: + return {self.key_col.to_python(k):self.value_col.to_python(v) for k,v in value.items()} + + def to_database(self, value): + return self.Quoter({self.key_col.to_database(k):self.value_col.to_database(v) for k,v in value.items()}) + + diff --git a/cqlengine/management.py b/cqlengine/management.py index 20ec5741f3..ba8b8145f7 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -62,7 +62,7 @@ def create_table(model, create_missing_keyspace=True): pkeys = [] qtypes = [] def add_column(col): - s = '"{}" {}'.format(col.db_field_name, col.db_type) + s = col.get_column_def() if col.primary_key: pkeys.append('"{}"'.format(col.db_field_name)) qtypes.append(s) for name, col in model._columns.items(): diff --git a/cqlengine/tests/columns/test_container_columns.py b/cqlengine/tests/columns/test_container_columns.py new file mode 100644 index 0000000000..064a7cc32d --- /dev/null +++ b/cqlengine/tests/columns/test_container_columns.py @@ -0,0 +1,116 @@ +from datetime import datetime, timedelta +from uuid import uuid4 + +from cqlengine import Model +from cqlengine import columns +from cqlengine.management import create_table, delete_table +from cqlengine.tests.base import BaseCassEngTestCase + +class TestSetModel(Model): + partition = columns.UUID(primary_key=True, default=uuid4) + int_set = columns.Set(columns.Integer, required=False) + text_set = columns.Set(columns.Text, required=False) + +class TestSetColumn(BaseCassEngTestCase): + + @classmethod + def setUpClass(cls): + super(TestSetColumn, cls).setUpClass() + delete_table(TestSetModel) + create_table(TestSetModel) + + @classmethod + def tearDownClass(cls): + super(TestSetColumn, cls).tearDownClass() + delete_table(TestSetModel) + + def test_io_success(self): + """ Tests that a basic usage works as expected """ + m1 = TestSetModel.create(int_set={1,2}, text_set={'kai', 'andreas'}) + m2 = TestSetModel.get(partition=m1.partition) + + assert isinstance(m2.int_set, set) + assert isinstance(m2.text_set, set) + + assert 1 in m2.int_set + assert 2 in m2.int_set + + assert 'kai' in m2.text_set + assert 'andreas' in m2.text_set + + +class TestListModel(Model): + partition = columns.UUID(primary_key=True, default=uuid4) + int_list = columns.List(columns.Integer, required=False) + text_list = columns.List(columns.Text, required=False) + +class TestListColumn(BaseCassEngTestCase): + + @classmethod + def setUpClass(cls): + super(TestListColumn, cls).setUpClass() + delete_table(TestListModel) + create_table(TestListModel) + + @classmethod + def tearDownClass(cls): + super(TestListColumn, cls).tearDownClass() + delete_table(TestListModel) + + def test_io_success(self): + """ Tests that a basic usage works as expected """ + m1 = TestListModel.create(int_list=[1,2], text_list=['kai', 'andreas']) + m2 = TestListModel.get(partition=m1.partition) + + assert isinstance(m2.int_list, tuple) + assert isinstance(m2.text_list, tuple) + + assert len(m2.int_list) == 2 + assert len(m2.text_list) == 2 + + assert m2.int_list[0] == 1 + assert m2.int_list[1] == 2 + + assert m2.text_list[0] == 'kai' + assert m2.text_list[1] == 'andreas' + + +class TestMapModel(Model): + partition = columns.UUID(primary_key=True, default=uuid4) + int_map = columns.Map(columns.Integer, columns.UUID, required=False) + text_map = columns.Map(columns.Text, columns.DateTime, required=False) + +class TestMapColumn(BaseCassEngTestCase): + + @classmethod + def setUpClass(cls): + super(TestMapColumn, cls).setUpClass() + delete_table(TestMapModel) + create_table(TestMapModel) + + @classmethod + def tearDownClass(cls): + super(TestMapColumn, cls).tearDownClass() + delete_table(TestMapModel) + + def test_io_success(self): + """ Tests that a basic usage works as expected """ + k1 = uuid4() + k2 = uuid4() + now = datetime.now() + then = now + timedelta(days=1) + m1 = TestMapModel.create(int_map={1:k1,2:k2}, text_map={'now':now, 'then':then}) + m2 = TestMapModel.get(partition=m1.partition) + + assert isinstance(m2.int_map, dict) + assert isinstance(m2.text_map, dict) + + assert 1 in m2.int_map + assert 2 in m2.int_map + assert m2.int_map[1] == k1 + assert m2.int_map[2] == k2 + + assert 'now' in m2.text_map + assert 'then' in m2.text_map + assert (now - m2.text_map['now']).total_seconds() < 0.001 + assert (then - m2.text_map['then']).total_seconds() < 0.001 From 7ab2585cb7e4786f56f378674ece0ac0b708de76 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Tue, 5 Mar 2013 21:47:15 -0800 Subject: [PATCH 0114/3726] adding tests around container column type validation --- .../tests/columns/test_container_columns.py | 23 ++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/cqlengine/tests/columns/test_container_columns.py b/cqlengine/tests/columns/test_container_columns.py index 064a7cc32d..4a1721e078 100644 --- a/cqlengine/tests/columns/test_container_columns.py +++ b/cqlengine/tests/columns/test_container_columns.py @@ -1,7 +1,7 @@ from datetime import datetime, timedelta from uuid import uuid4 -from cqlengine import Model +from cqlengine import Model, ValidationError from cqlengine import columns from cqlengine.management import create_table, delete_table from cqlengine.tests.base import BaseCassEngTestCase @@ -38,6 +38,13 @@ def test_io_success(self): assert 'kai' in m2.text_set assert 'andreas' in m2.text_set + def test_type_validation(self): + """ + Tests that attempting to use the wrong types will raise an exception + """ + with self.assertRaises(ValidationError): + TestSetModel.create(int_set={'string', True}, text_set={1, 3.0}) + class TestListModel(Model): partition = columns.UUID(primary_key=True, default=uuid4) @@ -74,6 +81,13 @@ def test_io_success(self): assert m2.text_list[0] == 'kai' assert m2.text_list[1] == 'andreas' + def test_type_validation(self): + """ + Tests that attempting to use the wrong types will raise an exception + """ + with self.assertRaises(ValidationError): + TestListModel.create(int_list=['string', True], text_list=[1, 3.0]) + class TestMapModel(Model): partition = columns.UUID(primary_key=True, default=uuid4) @@ -114,3 +128,10 @@ def test_io_success(self): assert 'then' in m2.text_map assert (now - m2.text_map['now']).total_seconds() < 0.001 assert (then - m2.text_map['then']).total_seconds() < 0.001 + + def test_type_validation(self): + """ + Tests that attempting to use the wrong types will raise an exception + """ + with self.assertRaises(ValidationError): + TestMapModel.create(int_map={'key':2,uuid4():'val'}, text_map={2:5}) From 412412bfb8c1ed68095ac43557f6e8ba27f1e09d Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Tue, 5 Mar 2013 21:47:57 -0800 Subject: [PATCH 0115/3726] fixing failing unit test --- cqlengine/tests/model/test_model_io.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cqlengine/tests/model/test_model_io.py b/cqlengine/tests/model/test_model_io.py index e24da14175..f94b5964c2 100644 --- a/cqlengine/tests/model/test_model_io.py +++ b/cqlengine/tests/model/test_model_io.py @@ -94,7 +94,7 @@ def test_reserved_cql_words_can_be_used_as_column_names(self): model1 = ReservedWordModel.create(token='1', insert=5) - model2 = ReservedWordModel.filter(token=1) + model2 = ReservedWordModel.filter(token='1') assert len(model2) == 1 assert model1.token == model2[0].token From eb2fe8c21fa12e279124689d4f3bcb274be3ea65 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Tue, 5 Mar 2013 21:53:22 -0800 Subject: [PATCH 0116/3726] adding collection types to changelog --- changelog | 1 + 1 file changed, 1 insertion(+) diff --git a/changelog b/changelog index c5f26914a5..a109cfcf0f 100644 --- a/changelog +++ b/changelog @@ -1,6 +1,7 @@ CHANGELOG 0.1.2 (in progress) +* adding set, list, and map collection types * adding support for allow filtering flag * updating management functions to work with cassandra 1.2 * fixed a bug querying datetimes From f1119db1c24bdad5237987059c11646404ba7d7a Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Tue, 5 Mar 2013 22:24:11 -0800 Subject: [PATCH 0117/3726] adding functions to the base module --- cqlengine/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cqlengine/__init__.py b/cqlengine/__init__.py index 1f8c70ad36..d5e7b74fcc 100644 --- a/cqlengine/__init__.py +++ b/cqlengine/__init__.py @@ -1,4 +1,5 @@ from cqlengine.columns import * +from cqlengine.functions import * from cqlengine.models import Model __version__ = '0.1.2' From 61235126bf84b86aa71655d5383228355b0b53df Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Tue, 5 Mar 2013 22:24:26 -0800 Subject: [PATCH 0118/3726] adding new features to the documentation --- docs/topics/columns.rst | 59 ++++++++++++++++++++++++++++++++++++++++ docs/topics/models.rst | 6 ++-- docs/topics/queryset.rst | 31 +++++++++++++++++++++ 3 files changed, 94 insertions(+), 2 deletions(-) diff --git a/docs/topics/columns.rst b/docs/topics/columns.rst index 9090b0bc0f..0537e3bccb 100644 --- a/docs/topics/columns.rst +++ b/docs/topics/columns.rst @@ -77,6 +77,65 @@ Columns columns.Decimal() +Collection Type Columns +---------------------------- + + CQLEngine also supports container column types. Each container column requires a column class argument to specify what type of objects it will hold. The Map column requires 2, one for the key, and the other for the value + + *Example* + + .. code-block:: python + + class Person(Model): + first_name = columns.Text() + last_name = columns.Text() + + friends = columns.Set(columns.Text) + enemies = columns.Set(columns.Text) + todo_list = columns.List(columns.Text) + birthdays = columns.Map(columns.Text, columns.DateTime) + + + +.. class:: Set() + + Stores a set of unordered, unique values. Available only with Cassandra 1.2 and above :: + + columns.Set(value_type) + + **options** + + :attr:`~columns.Set.value_type` + The type of objects the set will contain + + :attr:`~columns.Set.strict` + If True, adding this column will raise an exception during save if the value is not a python `set` instance. If False, it will attempt to coerce the value to a set. Defaults to True. + +.. class:: List() + + Stores a list of ordered values. Available only with Cassandra 1.2 and above :: + + columns.List(value_type) + + **options** + + :attr:`~columns.List.value_type` + The type of objects the set will contain + +.. class:: Map() + + Stores a map (dictionary) collection, available only with Cassandra 1.2 and above :: + + columns.Map(key_type, value_type) + + **options** + + :attr:`~columns.Map.key_type` + The type of the map keys + + :attr:`~columns.Map.value_type` + The type of the map values + Column Options ============== diff --git a/docs/topics/models.rst b/docs/topics/models.rst index 86788a2530..0dd2c19cf8 100644 --- a/docs/topics/models.rst +++ b/docs/topics/models.rst @@ -50,11 +50,13 @@ Column Types * :class:`~cqlengine.columns.Integer` * :class:`~cqlengine.columns.DateTime` * :class:`~cqlengine.columns.UUID` + * :class:`~cqlengine.columns.TimeUUID` * :class:`~cqlengine.columns.Boolean` * :class:`~cqlengine.columns.Float` * :class:`~cqlengine.columns.Decimal` - - A time uuid field is in the works. + * :class:`~cqlengine.columns.Set` + * :class:`~cqlengine.columns.List` + * :class:`~cqlengine.columns.Map` Column Options -------------- diff --git a/docs/topics/queryset.rst b/docs/topics/queryset.rst index 63b76d03a6..ce3a2c818c 100644 --- a/docs/topics/queryset.rst +++ b/docs/topics/queryset.rst @@ -151,6 +151,33 @@ Filtering Operators q = Automobile.objects.filter(manufacturer='Tesla') q = q.filter(year__lte=2012) # year <= 2012 + +TimeUUID Functions +================== + + In addition to querying using regular values, there are two functions you can pass in when querying TimeUUID columns to help make filtering by them easier. Note that these functions don't actually return a value, but instruct the cql interpreter to use the functions in it's query. + + .. class:: MinTimeUUID(datetime) + + returns the minimum time uuid value possible for the given datetime + + .. class:: MaxTimeUUID(datetime) + + returns the maximum time uuid value possible for the given datetime + + *Example* + + .. code-block:: python + + class DataStream(Model): + time = cqlengine.TimeUUID(primary_key=True) + data = cqlengine.Bytes() + + min_time = datetime(1982, 1, 1) + max_time = datetime(1982, 3, 9) + + DataStream.filter(time__gt=cqlengine.MinTimeUUID(min_time), time__lt=cqlengine.MaxTimeUUID(max_time)) + QuerySets are imutable ====================== @@ -223,3 +250,7 @@ QuerySet method reference :type field_name: string Sets the field to order on. + + .. method:: allow_filtering() + + Enables the (usually) unwise practive of querying on a clustering key without also defining a partition key From cb5945f96cc76d71f81cc878a6d20a54f47cd511 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Wed, 6 Mar 2013 20:41:13 -0800 Subject: [PATCH 0119/3726] renaming the value manager's initial_value field to previous_value --- cqlengine/columns.py | 14 ++++++++++++-- cqlengine/tests/model/test_model_io.py | 2 +- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index fe7ad9e348..67d9212af2 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -12,12 +12,22 @@ class BaseValueManager(object): def __init__(self, instance, column, value): self.instance = instance self.column = column - self.initial_value = copy(value) + self.previous_value = copy(value) self.value = value @property def deleted(self): - return self.value is None and self.initial_value is not None + return self.value is None and self.previous_value is not None + + @property + def changed(self): + """ + Indicates whether or not this value has changed. + + :rtype: boolean + + """ + return self.value != self.previous_value def getval(self): return self.value diff --git a/cqlengine/tests/model/test_model_io.py b/cqlengine/tests/model/test_model_io.py index f94b5964c2..4c40e2beb3 100644 --- a/cqlengine/tests/model/test_model_io.py +++ b/cqlengine/tests/model/test_model_io.py @@ -62,7 +62,7 @@ def test_column_deleting_works_properly(self): tm2 = TestModel.objects(id=tm.pk).first() assert tm2.text is None - assert tm2._values['text'].initial_value is None + assert tm2._values['text'].previous_value is None def test_a_sensical_error_is_raised_if_you_try_to_create_a_table_twice(self): From da3895aeb35b934494a3a5be528c7e4125aa3180 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Wed, 6 Mar 2013 21:13:19 -0800 Subject: [PATCH 0120/3726] adding flags for checking if an object has been persisted and if it can be persisted with an update --- cqlengine/models.py | 23 +++++++++++++- cqlengine/query.py | 4 ++- cqlengine/tests/model/test_model_io.py | 42 ++++++++++++++++++++++++++ 3 files changed, 67 insertions(+), 2 deletions(-) diff --git a/cqlengine/models.py b/cqlengine/models.py index a856c59f2f..101a328b25 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -32,6 +32,21 @@ def __init__(self, **values): value_mngr = column.value_manager(self, column, value) self._values[name] = value_mngr + # a flag set by the deserializer to indicate + # that update should be used when persisting changes + self._is_persisted = False + + def _can_update(self): + """ + Called by the save function to check if this should be + persisted with update or insert + + :return: + """ + if not self._is_persisted: return False + pks = self._primary_keys.keys() + return all([not self._values[k].changed for k in self._primary_keys]) + def __eq__(self, other): return self.as_dict() == other.as_dict() @@ -101,7 +116,13 @@ def save(self): is_new = self.pk is None self.validate() self.objects.save(self) - #delete any fields that have been deleted / set to none + + #reset the value managers + for v in self._values.values(): + v.previous_value = v.value + + self._is_persisted = True + return self def delete(self): diff --git a/cqlengine/query.py b/cqlengine/query.py index 7a9a74e77a..e4a1e84dff 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -314,7 +314,9 @@ def _construct_instance(self, values): field_dict[db_map[key]] = val else: field_dict[key] = val - return self.model(**field_dict) + instance = self.model(**field_dict) + instance._is_persisted = True + return instance def first(self): try: diff --git a/cqlengine/tests/model/test_model_io.py b/cqlengine/tests/model/test_model_io.py index 4c40e2beb3..707e6736c3 100644 --- a/cqlengine/tests/model/test_model_io.py +++ b/cqlengine/tests/model/test_model_io.py @@ -1,4 +1,5 @@ from unittest import skip +from uuid import uuid4 from cqlengine.tests.base import BaseCassEngTestCase from cqlengine.management import create_table @@ -71,6 +72,47 @@ def test_a_sensical_error_is_raised_if_you_try_to_create_a_table_twice(self): create_table(TestModel) create_table(TestModel) +class TestCanUpdate(BaseCassEngTestCase): + + @classmethod + def setUpClass(cls): + super(TestCanUpdate, cls).setUpClass() + delete_table(TestModel) + create_table(TestModel) + + @classmethod + def tearDownClass(cls): + super(TestCanUpdate, cls).tearDownClass() + delete_table(TestModel) + + def test_success_case(self): + tm = TestModel(count=8, text='123456789') + + # object hasn't been saved, + # shouldn't be able to update + assert not tm._is_persisted + assert not tm._can_update() + + tm.save() + + # object has been saved, + # should be able to update + assert tm._is_persisted + assert tm._can_update() + + tm.count = 200 + + # primary keys haven't changed, + # should still be able to update + assert tm._can_update() + + tm.id = uuid4() + + # primary keys have changed, + # should not be able to update + assert not tm._can_update() + + class IndexDefinitionModel(Model): key = columns.UUID(primary_key=True) val = columns.Text(index=True) From e77c056f98f638e02e459fc32ea8290110a0a5f9 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Wed, 6 Mar 2013 22:12:52 -0800 Subject: [PATCH 0121/3726] adding support for either insert or update statements on object persistence --- cqlengine/query.py | 45 +++++++++++++++++++---- cqlengine/tests/model/test_model_io.py | 49 ++++++++++++++++++++++++++ 2 files changed, 87 insertions(+), 7 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index e4a1e84dff..da95798472 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -511,14 +511,45 @@ def save(self, instance): #construct query string field_names = zip(*value_pairs)[0] field_values = dict(value_pairs) - qs = ["INSERT INTO {}".format(self.column_family_name)] - qs += ["({})".format(', '.join(['"{}"'.format(f) for f in field_names]))] - qs += ['VALUES'] - qs += ["({})".format(', '.join([':'+f for f in field_names]))] + + qs = [] + if instance._can_update(): + qs += ["UPDATE {}".format(self.column_family_name)] + qs += ["SET"] + + set_statements = [] + #get defined fields and their column names + for name, col in self.model._columns.items(): + if not col.is_primary_key: + val = values.get(name) + if val is None: continue + set_statements += ['"{0}" = :{0}'.format(col.db_field_name)] + qs += [', '.join(set_statements)] + + qs += ['WHERE'] + + where_statements = [] + for name, col in self.model._primary_keys.items(): + where_statements += ['"{0}" = :{0}'.format(col.db_field_name)] + + qs += [' AND '.join(where_statements)] + + # clear the qs if there are not set statements + if not set_statements: qs = [] + + else: + qs += ["INSERT INTO {}".format(self.column_family_name)] + qs += ["({})".format(', '.join(['"{}"'.format(f) for f in field_names]))] + qs += ['VALUES'] + qs += ["({})".format(', '.join([':'+f for f in field_names]))] + qs = ' '.join(qs) - with connection_manager() as con: - con.execute(qs, field_values) + # skip query execution if it's empty + # caused by pointless update queries + if qs: + with connection_manager() as con: + con.execute(qs, field_values) #delete deleted / nulled columns deleted = [k for k,v in instance._values.items() if v.deleted] @@ -529,7 +560,7 @@ def save(self, instance): qs = ['DELETE {}'.format(', '.join(['"{}"'.format(f) for f in del_fields]))] qs += ['FROM {}'.format(self.column_family_name)] qs += ['WHERE'] - eq = lambda col: '{0} = :{0}'.format(v.column.db_field_name) + eq = lambda col: '{0} = :{0}'.format(col.db_field_name) qs += [' AND '.join([eq(f) for f in pks.values()])] qs = ' '.join(qs) diff --git a/cqlengine/tests/model/test_model_io.py b/cqlengine/tests/model/test_model_io.py index 707e6736c3..5ced331fb3 100644 --- a/cqlengine/tests/model/test_model_io.py +++ b/cqlengine/tests/model/test_model_io.py @@ -1,5 +1,6 @@ from unittest import skip from uuid import uuid4 +import random from cqlengine.tests.base import BaseCassEngTestCase from cqlengine.management import create_table @@ -72,6 +73,53 @@ def test_a_sensical_error_is_raised_if_you_try_to_create_a_table_twice(self): create_table(TestModel) create_table(TestModel) +class TestMultiKeyModel(Model): + partition = columns.Integer(primary_key=True) + cluster = columns.Integer(primary_key=True) + count = columns.Integer(required=False) + text = columns.Text(required=False) + +class TestUpdating(BaseCassEngTestCase): + + @classmethod + def setUpClass(cls): + super(TestUpdating, cls).setUpClass() + delete_table(TestMultiKeyModel) + create_table(TestMultiKeyModel) + + @classmethod + def tearDownClass(cls): + super(TestUpdating, cls).tearDownClass() + delete_table(TestMultiKeyModel) + + def setUp(self): + super(TestUpdating, self).setUp() + self.instance = TestMultiKeyModel.create( + partition=random.randint(0, 1000), + cluster=random.randint(0, 1000), + count=0, + text='happy' + ) + + def test_vanilla_update(self): + self.instance.count = 5 + self.instance.save() + + check = TestMultiKeyModel.get(partition=self.instance.partition, cluster=self.instance.cluster) + assert check.count == 5 + assert check.text == 'happy' + + def test_deleting_only(self): + self.instance.count = None + self.instance.text = None + self.instance.save() + + check = TestMultiKeyModel.get(partition=self.instance.partition, cluster=self.instance.cluster) + assert check.count is None + assert check.text is None + + + class TestCanUpdate(BaseCassEngTestCase): @classmethod @@ -105,6 +153,7 @@ def test_success_case(self): # primary keys haven't changed, # should still be able to update assert tm._can_update() + tm.save() tm.id = uuid4() From fcae60188e8b6ce7e58ced825bfd56bc5820df3d Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Wed, 6 Mar 2013 22:14:16 -0800 Subject: [PATCH 0122/3726] updating changelog --- changelog | 1 + 1 file changed, 1 insertion(+) diff --git a/changelog b/changelog index a109cfcf0f..00dc02bced 100644 --- a/changelog +++ b/changelog @@ -1,6 +1,7 @@ CHANGELOG 0.1.2 (in progress) +* expanding internal save function to use update where appropriate * adding set, list, and map collection types * adding support for allow filtering flag * updating management functions to work with cassandra 1.2 From 54d4b804c4f10926a10c8e7bf9c2703917aead84 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 7 Mar 2013 21:59:55 -0800 Subject: [PATCH 0123/3726] adding test around deleting multi key models --- cqlengine/tests/model/test_model_io.py | 28 ++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/cqlengine/tests/model/test_model_io.py b/cqlengine/tests/model/test_model_io.py index 5ced331fb3..7f708cf214 100644 --- a/cqlengine/tests/model/test_model_io.py +++ b/cqlengine/tests/model/test_model_io.py @@ -73,12 +73,40 @@ def test_a_sensical_error_is_raised_if_you_try_to_create_a_table_twice(self): create_table(TestModel) create_table(TestModel) + class TestMultiKeyModel(Model): partition = columns.Integer(primary_key=True) cluster = columns.Integer(primary_key=True) count = columns.Integer(required=False) text = columns.Text(required=False) +class TestDeleting(BaseCassEngTestCase): + + @classmethod + def setUpClass(cls): + super(TestDeleting, cls).setUpClass() + delete_table(TestMultiKeyModel) + create_table(TestMultiKeyModel) + + @classmethod + def tearDownClass(cls): + super(TestDeleting, cls).tearDownClass() + delete_table(TestMultiKeyModel) + + def test_deleting_only_deletes_one_object(self): + partition = random.randint(0,1000) + for i in range(5): + TestMultiKeyModel.create(partition=partition, cluster=i, count=i, text=str(i)) + + assert TestMultiKeyModel.filter(partition=partition).count() == 5 + + TestMultiKeyModel.get(partition=partition, cluster=0).delete() + + assert TestMultiKeyModel.filter(partition=partition).count() == 4 + + TestMultiKeyModel.filter(partition=partition).delete() + + class TestUpdating(BaseCassEngTestCase): @classmethod From dc6031ae9234b9c7333b89ddb0fc95923140df00 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 7 Mar 2013 22:43:42 -0800 Subject: [PATCH 0124/3726] Implementing batch queries (not tested yet) --- cqlengine/models.py | 16 ++- cqlengine/query.py | 209 +++++++++++++++++++++------- cqlengine/tests/test_batch_query.py | 0 3 files changed, 173 insertions(+), 52 deletions(-) create mode 100644 cqlengine/tests/test_batch_query.py diff --git a/cqlengine/models.py b/cqlengine/models.py index 101a328b25..ca9ee77a30 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -4,7 +4,7 @@ from cqlengine import columns from cqlengine.exceptions import ModelException from cqlengine.functions import BaseQueryFunction -from cqlengine.query import QuerySet, QueryException +from cqlengine.query import QuerySet, QueryException, DMLQuery class ModelDefinitionException(ModelException): pass @@ -112,10 +112,13 @@ def filter(cls, **kwargs): def get(cls, **kwargs): return cls.objects.get(**kwargs) - def save(self): + def save(self, batch_obj=None): is_new = self.pk is None self.validate() - self.objects.save(self) + if batch_obj: + DMLQuery(self.__class__, self).batch(batch_obj).save() + else: + DMLQuery(self.__class__, self).save() #reset the value managers for v in self._values.values(): @@ -127,8 +130,13 @@ def save(self): def delete(self): """ Deletes this instance """ - self.objects.delete_instance(self) + DMLQuery(self.__class__, self).delete() + def batch(self, batch_obj): + """ + Returns a batched DML query + """ + return DMLQuery(self.__class__, self).batch(batch_obj) class ModelMetaClass(type): diff --git a/cqlengine/query.py b/cqlengine/query.py index da95798472..3405f0518e 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -1,7 +1,9 @@ from collections import namedtuple import copy +from datetime import datetime from hashlib import md5 from time import time +from uuid import uuid1 from cqlengine.connection import connection_manager from cqlengine.exceptions import CQLEngineException @@ -28,7 +30,7 @@ def __init__(self, column, value): #the identifier is a unique key that will be used in string #replacement on query strings, it's created from a hash #of this object's id and the time - self.identifier = md5(str(id(self)) + str(time())).hexdigest() + self.identifier = uuid1().hex #perform validation on this operator self.validate_operator() @@ -123,6 +125,59 @@ class LessThanOrEqualOperator(QueryOperator): symbol = "LTE" cql_symbol = '<=' +class Consistency(object): + ANY = 'ANY' + ONE = 'ONE' + QUORUM = 'QUORUM' + LOCAL_QUORUM = 'LOCAL_QUORUM' + EACH_QUORUM = 'EACH_QUORUM' + ALL = 'ALL' + +class BatchQuery(object): + """ + Handles the batching of queries + """ + + def __init__(self, consistency=Consistency.ONE, timestamp=None): + self.queries = [] + self.consistency = consistency + if timestamp is not None and not isinstance(timestamp, datetime): + raise CQLEngineException('timestamp object must be an instance of datetime') + self.timestamp = timestamp + + def add_query(self, query, params): + self.queries.append((query, params)) + + def execute(self): + query_list = [] + parameters = {} + + opener = 'BEGIN BATCH USING CONSISTENCY {}'.format(self.consistency) + if self.timestamp: + epoch = datetime(1970, 1, 1) + ts = long((self.timestamp - epoch).total_seconds() * 1000) + opener += ' TIMESTAMP {}'.format(ts) + + query_list = [opener] + for query, params in self.queries: + query_list.append(query) + parameters.update(params) + + query_list.append('APPLY BATCH;') + + with connection_manager() as con: + con.execute('\n'.join(query_list), parameters) + + self.queries = [] + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + #don't execute if there was an exception + if exc_type is not None: return + self.execute() + class QuerySet(object): def __init__(self, model): @@ -152,6 +207,8 @@ def __init__(self, model): self._result_cache = None self._result_idx = None + self._batch = None + def __unicode__(self): return self._select_query() @@ -242,6 +299,8 @@ def _select_query(self): #----Reads------ def _execute_query(self): + if self._batch: + raise CQLEngineException("Only inserts, updates, and deletes are available in batch mode") if self._result_cache is None: self._con = connection_manager() self._cur = self._con.execute(self._select_query(), self._where_values()) @@ -318,6 +377,18 @@ def _construct_instance(self, values): instance._is_persisted = True return instance + def batch(self, batch_obj): + """ + Adds a batch query to the mix + :param batch_obj: + :return: + """ + if not isinstance(batch_obj, BatchQuery): + raise CQLEngineException('batch_obj must be a BatchQuery instance') + clone = copy.deepcopy(self) + clone._batch = batch_obj + return clone + def first(self): try: return iter(self).next() @@ -379,8 +450,6 @@ def get(self, **kwargs): else: return self[0] - - def order_by(self, colname): """ orders the result set. @@ -416,6 +485,8 @@ def order_by(self, colname): def count(self): """ Returns the number of rows matched by this query """ + if self._batch: + raise CQLEngineException("Only inserts, updates, and deletes are available in batch mode") #TODO: check for previous query execution and return row count if it exists if self._result_cache is None: qs = ['SELECT COUNT(*)'] @@ -426,7 +497,7 @@ def count(self): qs += ['ALLOW FILTERING'] qs = ' '.join(qs) - + with connection_manager() as con: cur = con.execute(qs, self._where_values()) return cur.fetchone()[0] @@ -435,7 +506,7 @@ def count(self): def limit(self, v): """ - Sets the limit on the number of results returned + Sets the limit on the number of results returned CQL has a default limit of 10,000 """ if not (v is None or isinstance(v, (int, long))): @@ -488,19 +559,64 @@ def defer(self, fields): """ Don't load these fields for the returned query """ return self._only_or_defer('defer', fields) - #----writes---- - def save(self, instance): + def create(self, **kwargs): + return self.model(**kwargs).save(batch_obj=self._batch) + + #----delete--- + def delete(self, columns=[]): + """ + Deletes the contents of a query + """ + #validate where clause + partition_key = self.model._primary_keys.values()[0] + if not any([c.column == partition_key for c in self._where]): + raise QueryException("The partition key must be defined on delete queries") + qs = ['DELETE FROM {}'.format(self.column_family_name)] + qs += ['WHERE {}'.format(self._where_clause())] + qs = ' '.join(qs) + + if self._batch: + self._batch.add_query(qs, self._where_values()) + else: + with connection_manager() as con: + con.execute(qs, self._where_values()) + +class DMLQuery(object): + """ + A query object used for queries performing inserts, updates, or deletes + + this is usually instantiated by the model instance to be modified + + unlike the read query object, this is mutable + """ + + def __init__(self, model, instance=None): + self.model = model + self.column_family_name = self.model.column_family_name() + self.instance = instance + self.batch = None + pass + + def batch(self, batch_obj): + if not isinstance(batch_obj, BatchQuery): + raise CQLEngineException('batch_obj must be a BatchQuery instance') + self.batch = batch_obj + return self + + def save(self): """ Creates / updates a row. This is a blind insert call. - All validation and cleaning needs to happen + All validation and cleaning needs to happen prior to calling this. """ - assert type(instance) == self.model + if self.instance is None: + raise CQLEngineException("DML Query intance attribute is None") + assert type(self.instance) == self.model #organize data value_pairs = [] - values = instance.as_dict() + values = self.instance.as_dict() #get defined fields and their column names for name, col in self.model._columns.items(): @@ -510,10 +626,12 @@ def save(self, instance): #construct query string field_names = zip(*value_pairs)[0] + field_ids = {n:uuid1().hex for n in field_names} field_values = dict(value_pairs) + query_values = {field_ids[n]:field_values[n] for n in field_names} qs = [] - if instance._can_update(): + if self.instance._can_update(): qs += ["UPDATE {}".format(self.column_family_name)] qs += ["SET"] @@ -523,14 +641,14 @@ def save(self, instance): if not col.is_primary_key: val = values.get(name) if val is None: continue - set_statements += ['"{0}" = :{0}'.format(col.db_field_name)] + set_statements += ['"{}" = :{}'.format(col.db_field_name, field_ids[col.db_field_name])] qs += [', '.join(set_statements)] qs += ['WHERE'] where_statements = [] for name, col in self.model._primary_keys.items(): - where_statements += ['"{0}" = :{0}'.format(col.db_field_name)] + where_statements += ['"{}" = :{}'.format(col.db_field_name, field_ids[col.db_field_name])] qs += [' AND '.join(where_statements)] @@ -541,18 +659,21 @@ def save(self, instance): qs += ["INSERT INTO {}".format(self.column_family_name)] qs += ["({})".format(', '.join(['"{}"'.format(f) for f in field_names]))] qs += ['VALUES'] - qs += ["({})".format(', '.join([':'+f for f in field_names]))] + qs += ["({})".format(', '.join([':'+field_ids[f] for f in field_names]))] qs = ' '.join(qs) # skip query execution if it's empty # caused by pointless update queries if qs: - with connection_manager() as con: - con.execute(qs, field_values) + if self.batch: + self.batch.add_query(qs, query_values) + else: + with connection_manager() as con: + con.execute(qs, query_values) #delete deleted / nulled columns - deleted = [k for k,v in instance._values.items() if v.deleted] + deleted = [k for k,v in self.instance._values.items() if v.deleted] if deleted: del_fields = [self.model._columns[f] for f in deleted] del_fields = [f.db_field_name for f in del_fields if not f.primary_key] @@ -560,44 +681,36 @@ def save(self, instance): qs = ['DELETE {}'.format(', '.join(['"{}"'.format(f) for f in del_fields]))] qs += ['FROM {}'.format(self.column_family_name)] qs += ['WHERE'] - eq = lambda col: '{0} = :{0}'.format(col.db_field_name) + eq = lambda col: '"{}" = :{}'.format(col.db_field_name, field_ids[col.db_field_name]) qs += [' AND '.join([eq(f) for f in pks.values()])] qs = ' '.join(qs) - pk_dict = dict([(v.db_field_name, getattr(instance, k)) for k,v in pks.items()]) - - with connection_manager() as con: - con.execute(qs, pk_dict) - - - def create(self, **kwargs): - return self.model(**kwargs).save() - - #----delete--- - def delete(self, columns=[]): - """ - Deletes the contents of a query - """ - #validate where clause - partition_key = self.model._primary_keys.values()[0] - if not any([c.column == partition_key for c in self._where]): - raise QueryException("The partition key must be defined on delete queries") - qs = ['DELETE FROM {}'.format(self.column_family_name)] - qs += ['WHERE {}'.format(self._where_clause())] - qs = ' '.join(qs) - - with connection_manager() as con: - con.execute(qs, self._where_values()) - + if self.batch: + self.batch.add_query(qs, query_values) + else: + with connection_manager() as con: + con.execute(qs, query_values) - def delete_instance(self, instance): + def delete(self): """ Deletes one instance """ - pk_name = self.model._pk_name + if self.instance is None: + raise CQLEngineException("DML Query intance attribute is None") + field_values = {} qs = ['DELETE FROM {}'.format(self.column_family_name)] - qs += ['WHERE {0}=:{0}'.format(pk_name)] + qs += ['WHERE'] + where_statements = [] + for name, col in self.model._primary_keys.items(): + field_id = uuid1().hex + field_values[field_id] = col.to_database(getattr(self.instance, name)) + where_statements += ['"{}" = :{}'.format(col.db_field_name, field_id)] + + qs += [' AND '.join(where_statements)] qs = ' '.join(qs) - with connection_manager() as con: - con.execute(qs, {pk_name:instance.pk}) + if self.batch: + self.batch.add_query(qs, field_values) + else: + with connection_manager() as con: + con.execute(qs, field_values) diff --git a/cqlengine/tests/test_batch_query.py b/cqlengine/tests/test_batch_query.py new file mode 100644 index 0000000000..e69de29bb2 From 241c942334a049cb9d6d3ceac700ea8afad6e4f4 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sat, 9 Mar 2013 17:43:10 -0800 Subject: [PATCH 0125/3726] restructuring some of the batch query internals, putting some unit tests around batch querying --- cqlengine/models.py | 42 +++++++++---- cqlengine/query.py | 18 +++--- cqlengine/tests/test_batch_query.py | 93 +++++++++++++++++++++++++++++ 3 files changed, 133 insertions(+), 20 deletions(-) diff --git a/cqlengine/models.py b/cqlengine/models.py index ca9ee77a30..da4e18fc13 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -8,6 +8,22 @@ class ModelDefinitionException(ModelException): pass +class hybrid_classmethod(object): + """ + Allows a method to behave as both a class method and + normal instance method depending on how it's called + """ + + def __init__(self, clsmethod, instmethod): + self.clsmethod = clsmethod + self.instmethod = instmethod + + def __get__(self, instance, owner): + if instance is None: + return self.clsmethod.__get__(owner, owner) + else: + return self.instmethod.__get__(instance, owner) + class BaseModel(object): """ The base model class, don't inherit from this, inherit from Model, defined below @@ -35,6 +51,7 @@ def __init__(self, **values): # a flag set by the deserializer to indicate # that update should be used when persisting changes self._is_persisted = False + self._batch = None def _can_update(self): """ @@ -112,13 +129,10 @@ def filter(cls, **kwargs): def get(cls, **kwargs): return cls.objects.get(**kwargs) - def save(self, batch_obj=None): + def save(self): is_new = self.pk is None self.validate() - if batch_obj: - DMLQuery(self.__class__, self).batch(batch_obj).save() - else: - DMLQuery(self.__class__, self).save() + DMLQuery(self.__class__, self, batch=self._batch).save() #reset the value managers for v in self._values.values(): @@ -130,13 +144,19 @@ def save(self, batch_obj=None): def delete(self): """ Deletes this instance """ - DMLQuery(self.__class__, self).delete() + DMLQuery(self.__class__, self, batch=self._batch).delete() + + @classmethod + def _class_batch(cls, batch): + return cls.objects.batch(batch) + + def _inst_batch(self, batch): + self._batch = batch + return self + + batch = hybrid_classmethod(_class_batch, _inst_batch) + - def batch(self, batch_obj): - """ - Returns a batched DML query - """ - return DMLQuery(self.__class__, self).batch(batch_obj) class ModelMetaClass(type): diff --git a/cqlengine/query.py b/cqlengine/query.py index 3405f0518e..0b60539981 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -138,7 +138,7 @@ class BatchQuery(object): Handles the batching of queries """ - def __init__(self, consistency=Consistency.ONE, timestamp=None): + def __init__(self, consistency=None, timestamp=None): self.queries = [] self.consistency = consistency if timestamp is not None and not isinstance(timestamp, datetime): @@ -149,18 +149,18 @@ def add_query(self, query, params): self.queries.append((query, params)) def execute(self): - query_list = [] - parameters = {} - - opener = 'BEGIN BATCH USING CONSISTENCY {}'.format(self.consistency) + opener = 'BEGIN BATCH' + if self.consistency: + opener += ' USING CONSISTENCY {}'.format(self.consistency) if self.timestamp: epoch = datetime(1970, 1, 1) ts = long((self.timestamp - epoch).total_seconds() * 1000) opener += ' TIMESTAMP {}'.format(ts) query_list = [opener] + parameters = {} for query, params in self.queries: - query_list.append(query) + query_list.append(' ' + query) parameters.update(params) query_list.append('APPLY BATCH;') @@ -560,7 +560,7 @@ def defer(self, fields): return self._only_or_defer('defer', fields) def create(self, **kwargs): - return self.model(**kwargs).save(batch_obj=self._batch) + return self.model(**kwargs).batch(self._batch).save() #----delete--- def delete(self, columns=[]): @@ -590,11 +590,11 @@ class DMLQuery(object): unlike the read query object, this is mutable """ - def __init__(self, model, instance=None): + def __init__(self, model, instance=None, batch=None): self.model = model self.column_family_name = self.model.column_family_name() self.instance = instance - self.batch = None + self.batch = batch pass def batch(self, batch_obj): diff --git a/cqlengine/tests/test_batch_query.py b/cqlengine/tests/test_batch_query.py index e69de29bb2..c6d509ebad 100644 --- a/cqlengine/tests/test_batch_query.py +++ b/cqlengine/tests/test_batch_query.py @@ -0,0 +1,93 @@ +from unittest import skip +from uuid import uuid4 +import random +from cqlengine import Model, columns +from cqlengine.management import delete_table, create_table +from cqlengine.query import BatchQuery, Consistency +from cqlengine.tests.base import BaseCassEngTestCase + +class TestMultiKeyModel(Model): + partition = columns.Integer(primary_key=True) + cluster = columns.Integer(primary_key=True) + count = columns.Integer(required=False) + text = columns.Text(required=False) + +class BatchQueryTests(BaseCassEngTestCase): + + @classmethod + def setUpClass(cls): + super(BatchQueryTests, cls).setUpClass() + delete_table(TestMultiKeyModel) + create_table(TestMultiKeyModel) + + @classmethod + def tearDownClass(cls): + super(BatchQueryTests, cls).tearDownClass() + delete_table(TestMultiKeyModel) + + def setUp(self): + super(BatchQueryTests, self).setUp() + self.pkey = 1 + for obj in TestMultiKeyModel.filter(partition=self.pkey): + obj.delete() + + + + def test_insert_success_case(self): + + b = BatchQuery() + inst = TestMultiKeyModel.batch(b).create(partition=self.pkey, cluster=2, count=3, text='4') + + with self.assertRaises(TestMultiKeyModel.DoesNotExist): + TestMultiKeyModel.get(partition=self.pkey, cluster=2) + + b.execute() + + TestMultiKeyModel.get(partition=self.pkey, cluster=2) + + def test_update_success_case(self): + + inst = TestMultiKeyModel.create(partition=self.pkey, cluster=2, count=3, text='4') + + b = BatchQuery() + + inst.count = 4 + inst.batch(b).save() + + inst2 = TestMultiKeyModel.get(partition=self.pkey, cluster=2) + assert inst2.count == 3 + + b.execute() + + inst3 = TestMultiKeyModel.get(partition=self.pkey, cluster=2) + assert inst3.count == 4 + + def test_delete_success_case(self): + + inst = TestMultiKeyModel.create(partition=self.pkey, cluster=2, count=3, text='4') + + b = BatchQuery() + + inst.batch(b).delete() + + TestMultiKeyModel.get(partition=self.pkey, cluster=2) + + b.execute() + + with self.assertRaises(TestMultiKeyModel.DoesNotExist): + TestMultiKeyModel.get(partition=self.pkey, cluster=2) + + def test_context_manager(self): + + with BatchQuery() as b: + for i in range(5): + TestMultiKeyModel.batch(b).create(partition=self.pkey, cluster=i, count=3, text='4') + + for i in range(5): + with self.assertRaises(TestMultiKeyModel.DoesNotExist): + TestMultiKeyModel.get(partition=self.pkey, cluster=i) + + for i in range(5): + TestMultiKeyModel.get(partition=self.pkey, cluster=i) + + From 66ca79f7779b6ade94e2ee89641fd934eaeee8ed Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sat, 9 Mar 2013 18:18:20 -0800 Subject: [PATCH 0126/3726] adding documentation for batch queries --- cqlengine/__init__.py | 1 + docs/topics/queryset.rst | 28 ++++++++++++++++++++++++++++ 2 files changed, 29 insertions(+) diff --git a/cqlengine/__init__.py b/cqlengine/__init__.py index d5e7b74fcc..184a4c8a38 100644 --- a/cqlengine/__init__.py +++ b/cqlengine/__init__.py @@ -1,6 +1,7 @@ from cqlengine.columns import * from cqlengine.functions import * from cqlengine.models import Model +from cqlengine.query import BatchQuery __version__ = '0.1.2' diff --git a/docs/topics/queryset.rst b/docs/topics/queryset.rst index ce3a2c818c..6733080cfe 100644 --- a/docs/topics/queryset.rst +++ b/docs/topics/queryset.rst @@ -213,6 +213,34 @@ Ordering QuerySets *For instance, given our Automobile model, year is the only column we can order on.* +Batch Queries +=============== + + cqlengine now supports batch queries using the BatchQuery class. Batch queries can be started and stopped manually, or within a context manager. To add queries to the batch object, you just need to precede the create/save/delete call with a call to batch, and pass in the batch object. + + You can only create, update, and delete rows with a batch query, attempting to read rows out of the database with a batch query will fail. + + .. code-block:: python + + from cqlengine import BatchQuery + + #using a context manager + with BatchQuery() as b: + now = datetime.now() + em1 = ExampleModel.batch(b).create(example_type=0, description="1", created_at=now) + em2 = ExampleModel.batch(b).create(example_type=0, description="2", created_at=now) + em3 = ExampleModel.batch(b).create(example_type=0, description="3", created_at=now) + + # -- or -- + + #manually + b = BatchQuery() + now = datetime.now() + em1 = ExampleModel.batch(b).create(example_type=0, description="1", created_at=now) + em2 = ExampleModel.batch(b).create(example_type=0, description="2", created_at=now) + em3 = ExampleModel.batch(b).create(example_type=0, description="3", created_at=now) + b.execute() + QuerySet method reference ========================= From af50ced2df9ddd3d0dbcfc41910d79080a65dff0 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 10 Mar 2013 14:11:16 -0700 Subject: [PATCH 0127/3726] adding support for partial updates for set columns --- cqlengine/columns.py | 68 ++++++++++++++++++- cqlengine/models.py | 3 +- cqlengine/query.py | 11 ++- .../tests/columns/test_container_columns.py | 26 +++++++ 4 files changed, 102 insertions(+), 6 deletions(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index 67d9212af2..84b245da0c 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -29,6 +29,9 @@ def changed(self): """ return self.value != self.previous_value + def reset_previous_value(self): + self.previous_value = copy(self.value) + def getval(self): return self.value @@ -283,9 +286,6 @@ def __init__(self, **kwargs): super(Counter, self).__init__(**kwargs) raise NotImplementedError -class ContainerValueManager(BaseValueManager): - pass - class ContainerQuoter(object): """ contains a single value, which will quote itself for CQL insertion statements @@ -323,6 +323,12 @@ def get_column_def(self): db_type = self.db_type.format(self.value_type.db_type) return '{} {}'.format(self.db_field_name, db_type) + def get_update_statement(self, val, prev, ctx): + """ + Used to add partial update statements + """ + raise NotImplementedError + class Set(BaseContainerColumn): """ Stores a set of unordered, unique values @@ -359,8 +365,50 @@ def validate(self, value): return {self.value_col.validate(v) for v in val} def to_database(self, value): + if value is None: return None return self.Quoter({self.value_col.to_database(v) for v in value}) + def get_update_statement(self, val, prev, ctx): + """ + Returns statements that will be added to an object's update statement + also updates the query context + + :param val: the current column value + :param prev: the previous column value + :param ctx: the values that will be passed to the query + :rtype: list + """ + + # remove from Quoter containers, if applicable + if isinstance(val, self.Quoter): val = val.value + if isinstance(prev, self.Quoter): prev = prev.value + + if val is None or val == prev: + # don't return anything if the new value is the same as + # the old one, or if the new value is none + return [] + elif prev is None or not any({v in prev for v in val}): + field = uuid1().hex + ctx[field] = self.Quoter(val) + return ['"{}" = :{}'.format(self.db_field_name, field)] + else: + # partial update time + to_create = val - prev + to_delete = prev - val + statements = [] + + if to_create: + field_id = uuid1().hex + ctx[field_id] = self.Quoter(to_create) + statements += ['"{0}" = "{0}" + :{1}'.format(self.db_field_name, field_id)] + + if to_delete: + field_id = uuid1().hex + ctx[field_id] = self.Quoter(to_delete) + statements += ['"{0}" = "{0}" - :{1}'.format(self.db_field_name, field_id)] + + return statements + class List(BaseContainerColumn): """ Stores a list of ordered values @@ -383,8 +431,15 @@ def validate(self, value): return [self.value_col.validate(v) for v in val] def to_database(self, value): + if value is None: return None return self.Quoter([self.value_col.to_database(v) for v in value]) + def get_update_statement(self, val, prev, values): + """ + http://en.wikipedia.org/wiki/Boyer%E2%80%93Moore_string_search_algorithm + """ + pass + class Map(BaseContainerColumn): """ Stores a key -> value map (dictionary) @@ -438,6 +493,13 @@ def to_python(self, value): return {self.key_col.to_python(k):self.value_col.to_python(v) for k,v in value.items()} def to_database(self, value): + if value is None: return None return self.Quoter({self.key_col.to_database(k):self.value_col.to_database(v) for k,v in value.items()}) + def get_update_statement(self, val, prev, ctx): + """ + http://www.datastax.com/docs/1.2/cql_cli/using/collections_map#deletion + """ + pass + diff --git a/cqlengine/models.py b/cqlengine/models.py index da4e18fc13..9b6ec3b084 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -136,8 +136,7 @@ def save(self): #reset the value managers for v in self._values.values(): - v.previous_value = v.value - + v.reset_previous_value() self._is_persisted = True return self diff --git a/cqlengine/query.py b/cqlengine/query.py index 0b60539981..fd91e45add 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -4,6 +4,7 @@ from hashlib import md5 from time import time from uuid import uuid1 +from cqlengine import BaseContainerColumn from cqlengine.connection import connection_manager from cqlengine.exceptions import CQLEngineException @@ -641,7 +642,15 @@ def save(self): if not col.is_primary_key: val = values.get(name) if val is None: continue - set_statements += ['"{}" = :{}'.format(col.db_field_name, field_ids[col.db_field_name])] + if isinstance(col, BaseContainerColumn): + #remove value from query values, the column will handle it + query_values.pop(field_ids.get(name), None) + + val_mgr = self.instance._values[name] + set_statements += col.get_update_statement(val, val_mgr.previous_value, query_values) + pass + else: + set_statements += ['"{}" = :{}'.format(col.db_field_name, field_ids[col.db_field_name])] qs += [', '.join(set_statements)] qs += ['WHERE'] diff --git a/cqlengine/tests/columns/test_container_columns.py b/cqlengine/tests/columns/test_container_columns.py index 4a1721e078..15f8919288 100644 --- a/cqlengine/tests/columns/test_container_columns.py +++ b/cqlengine/tests/columns/test_container_columns.py @@ -45,6 +45,32 @@ def test_type_validation(self): with self.assertRaises(ValidationError): TestSetModel.create(int_set={'string', True}, text_set={1, 3.0}) + def test_partial_updates(self): + """ Tests that partial udpates work as expected """ + m1 = TestSetModel.create(int_set={1,2,3,4}) + + m1.int_set.add(5) + m1.int_set.remove(1) + assert m1.int_set == {2,3,4,5} + + m1.save() + + m2 = TestSetModel.get(partition=m1.partition) + assert m2.int_set == {2,3,4,5} + + def test_partial_update_creation(self): + """ + Tests that proper update statements are created for a partial set update + :return: + """ + ctx = {} + col = columns.Set(columns.Integer, db_field="TEST") + statements = col.get_update_statement({1,2,3,4}, {2,3,4,5}, ctx) + + assert len([v for v in ctx.values() if {1} == v.value]) == 1 + assert len([v for v in ctx.values() if {5} == v.value]) == 1 + assert len([s for s in statements if '"TEST" = "TEST" -' in s]) == 1 + assert len([s for s in statements if '"TEST" = "TEST" +' in s]) == 1 class TestListModel(Model): partition = columns.UUID(primary_key=True, default=uuid4) From cdb1dc964287442dab13eb2300a2f4140e1d5ffb Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 10 Mar 2013 19:13:07 -0700 Subject: [PATCH 0128/3726] adding support for partial updates for list columns --- cqlengine/columns.py | 62 ++++++++++++++++++- .../tests/columns/test_container_columns.py | 28 +++++++++ 2 files changed, 88 insertions(+), 2 deletions(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index 84b245da0c..6144d6c129 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -436,9 +436,67 @@ def to_database(self, value): def get_update_statement(self, val, prev, values): """ - http://en.wikipedia.org/wiki/Boyer%E2%80%93Moore_string_search_algorithm + Returns statements that will be added to an object's update statement + also updates the query context """ - pass + # remove from Quoter containers, if applicable + if isinstance(val, self.Quoter): val = val.value + if isinstance(prev, self.Quoter): prev = prev.value + + def _insert(): + field_id = uuid1().hex + values[field_id] = self.Quoter(val) + return ['"{}" = :{}'.format(self.db_field_name, field_id)] + + if val is None or val == prev: + return [] + elif prev is None: + return _insert() + elif len(val) < len(prev): + return _insert() + else: + # the prepend and append lists, + # if both of these are still None after looking + # at both lists, an insert statement will be returned + prepend = None + append = None + + # the max start idx we want to compare + search_space = len(val) - max(0, len(prev)-1) + + # the size of the sub lists we want to look at + search_size = len(prev) + + for i in range(search_space): + #slice boundary + j = i + search_size + sub = val[i:j] + idx_cmp = lambda idx: prev[idx] == sub[idx] + if idx_cmp(0) and idx_cmp(-1) and prev == sub: + prepend = val[:i] + append = val[j:] + break + + # create update statements + if prepend is append is None: + return _insert() + + statements = [] + if prepend: + field_id = uuid1().hex + # CQL seems to prepend element at a time, starting + # with the element at idx 0, we can either reverse + # it here, or have it inserted in reverse + prepend.reverse() + values[field_id] = self.Quoter(prepend) + statements += ['"{0}" = :{1} + "{0}"'.format(self.db_field_name, field_id)] + + if append: + field_id = uuid1().hex + values[field_id] = self.Quoter(append) + statements += ['"{0}" = "{0}" + :{1}'.format(self.db_field_name, field_id)] + + return statements class Map(BaseContainerColumn): """ diff --git a/cqlengine/tests/columns/test_container_columns.py b/cqlengine/tests/columns/test_container_columns.py index 15f8919288..11a0af001e 100644 --- a/cqlengine/tests/columns/test_container_columns.py +++ b/cqlengine/tests/columns/test_container_columns.py @@ -114,6 +114,34 @@ def test_type_validation(self): with self.assertRaises(ValidationError): TestListModel.create(int_list=['string', True], text_list=[1, 3.0]) + def test_partial_updates(self): + """ Tests that partial udpates work as expected """ + final = range(10) + initial = final[3:7] + m1 = TestListModel.create(int_list=initial) + + m1.int_list = final + m1.save() + + m2 = TestListModel.get(partition=m1.partition) + assert list(m2.int_list) == final + + def test_partial_update_creation(self): + """ + Tests that proper update statements are created for a partial list update + :return: + """ + final = range(10) + initial = final[3:7] + + ctx = {} + col = columns.List(columns.Integer, db_field="TEST") + statements = col.get_update_statement(final, initial, ctx) + + assert len([v for v in ctx.values() if [0,1,2] == v.value]) == 1 + assert len([v for v in ctx.values() if [7,8,9] == v.value]) == 1 + assert len([s for s in statements if '"TEST" = "TEST" +' in s]) == 1 + assert len([s for s in statements if '+ "TEST"' in s]) == 1 class TestMapModel(Model): partition = columns.UUID(primary_key=True, default=uuid4) From a6332f12a0447458a97a49a5c12533311ea36efb Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 10 Mar 2013 19:53:22 -0700 Subject: [PATCH 0129/3726] adding support for partial updates for map columns --- cqlengine/columns.py | 51 ++++++++++++++++++- cqlengine/query.py | 35 +++++++++---- .../tests/columns/test_container_columns.py | 37 ++++++++++++++ 3 files changed, 112 insertions(+), 11 deletions(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index 6144d6c129..fd5fc7c9ca 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -366,6 +366,7 @@ def validate(self, value): def to_database(self, value): if value is None: return None + if isinstance(value, self.Quoter): return value return self.Quoter({self.value_col.to_database(v) for v in value}) def get_update_statement(self, val, prev, ctx): @@ -380,6 +381,8 @@ def get_update_statement(self, val, prev, ctx): """ # remove from Quoter containers, if applicable + val = self.to_database(val) + prev = self.to_database(prev) if isinstance(val, self.Quoter): val = val.value if isinstance(prev, self.Quoter): prev = prev.value @@ -432,6 +435,7 @@ def validate(self, value): def to_database(self, value): if value is None: return None + if isinstance(value, self.Quoter): return value return self.Quoter([self.value_col.to_database(v) for v in value]) def get_update_statement(self, val, prev, values): @@ -440,6 +444,8 @@ def get_update_statement(self, val, prev, values): also updates the query context """ # remove from Quoter containers, if applicable + val = self.to_database(val) + prev = self.to_database(prev) if isinstance(val, self.Quoter): val = val.value if isinstance(prev, self.Quoter): prev = prev.value @@ -552,12 +558,55 @@ def to_python(self, value): def to_database(self, value): if value is None: return None + if isinstance(value, self.Quoter): return value return self.Quoter({self.key_col.to_database(k):self.value_col.to_database(v) for k,v in value.items()}) def get_update_statement(self, val, prev, ctx): """ http://www.datastax.com/docs/1.2/cql_cli/using/collections_map#deletion """ - pass + # remove from Quoter containers, if applicable + val = self.to_database(val) + prev = self.to_database(prev) + if isinstance(val, self.Quoter): val = val.value + if isinstance(prev, self.Quoter): prev = prev.value + + #get the updated map + update = {k:v for k,v in val.items() if v != prev.get(k)} + + statements = [] + for k,v in update.items(): + key_id = uuid1().hex + val_id = uuid1().hex + ctx[key_id] = k + ctx[val_id] = v + statements += ['"{}"[:{}] = :{}'.format(self.db_field_name, key_id, val_id)] + + return statements + + def get_delete_statement(self, val, prev, ctx): + """ + Returns statements that will be added to an object's delete statement + also updates the query context, used for removing keys from a map + """ + if val is prev is None: + return [] + + val = self.to_database(val) + prev = self.to_database(prev) + if isinstance(val, self.Quoter): val = val.value + if isinstance(prev, self.Quoter): prev = prev.value + + old_keys = set(prev.keys()) + new_keys = set(val.keys()) + del_keys = old_keys - new_keys + + del_statements = [] + for key in del_keys: + field_id = uuid1().hex + ctx[field_id] = key + del_statements += ['"{}"[:{}]'.format(self.db_field_name, field_id)] + + return del_statements diff --git a/cqlengine/query.py b/cqlengine/query.py index fd91e45add..29ab13962b 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -4,7 +4,7 @@ from hashlib import md5 from time import time from uuid import uuid1 -from cqlengine import BaseContainerColumn +from cqlengine import BaseContainerColumn, BaseValueManager, Map from cqlengine.connection import connection_manager from cqlengine.exceptions import CQLEngineException @@ -681,17 +681,32 @@ def save(self): with connection_manager() as con: con.execute(qs, query_values) - #delete deleted / nulled columns - deleted = [k for k,v in self.instance._values.items() if v.deleted] - if deleted: - del_fields = [self.model._columns[f] for f in deleted] - del_fields = [f.db_field_name for f in del_fields if not f.primary_key] - pks = self.model._primary_keys - qs = ['DELETE {}'.format(', '.join(['"{}"'.format(f) for f in del_fields]))] + + # delete nulled columns and removed map keys + qs = ['DELETE'] + query_values = {} + + del_statements = [] + for k,v in self.instance._values.items(): + col = v.column + if v.deleted: + del_statements += ['"{}"'.format(col.db_field_name)] + elif isinstance(col, Map): + del_statements += col.get_delete_statement(v.value, v.previous_value, query_values) + + if del_statements: + qs += [', '.join(del_statements)] + qs += ['FROM {}'.format(self.column_family_name)] + qs += ['WHERE'] - eq = lambda col: '"{}" = :{}'.format(col.db_field_name, field_ids[col.db_field_name]) - qs += [' AND '.join([eq(f) for f in pks.values()])] + where_statements = [] + for name, col in self.model._primary_keys.items(): + field_id = uuid1().hex + query_values[field_id] = field_values[name] + where_statements += ['"{}" = :{}'.format(col.db_field_name, field_id)] + qs += [' AND '.join(where_statements)] + qs = ' '.join(qs) if self.batch: diff --git a/cqlengine/tests/columns/test_container_columns.py b/cqlengine/tests/columns/test_container_columns.py index 11a0af001e..28dca78366 100644 --- a/cqlengine/tests/columns/test_container_columns.py +++ b/cqlengine/tests/columns/test_container_columns.py @@ -189,3 +189,40 @@ def test_type_validation(self): """ with self.assertRaises(ValidationError): TestMapModel.create(int_map={'key':2,uuid4():'val'}, text_map={2:5}) + + def test_partial_updates(self): + """ Tests that partial udpates work as expected """ + now = datetime.now() + #derez it a bit + now = datetime(*now.timetuple()[:-3]) + early = now - timedelta(minutes=30) + earlier = early - timedelta(minutes=30) + later = now + timedelta(minutes=30) + + initial = {'now':now, 'early':earlier} + final = {'later':later, 'early':early} + + m1 = TestMapModel.create(text_map=initial) + + m1.text_map = final + m1.save() + + m2 = TestMapModel.get(partition=m1.partition) + assert m2.text_map == final + +# def test_partial_update_creation(self): +# """ +# Tests that proper update statements are created for a partial list update +# :return: +# """ +# final = range(10) +# initial = final[3:7] +# +# ctx = {} +# col = columns.List(columns.Integer, db_field="TEST") +# statements = col.get_update_statement(final, initial, ctx) +# +# assert len([v for v in ctx.values() if [0,1,2] == v.value]) == 1 +# assert len([v for v in ctx.values() if [7,8,9] == v.value]) == 1 +# assert len([s for s in statements if '"TEST" = "TEST" +' in s]) == 1 +# assert len([s for s in statements if '+ "TEST"' in s]) == 1 From bd7c5c080a648e3e9d2e500a208a6aa432159765 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 10 Mar 2013 20:00:39 -0700 Subject: [PATCH 0130/3726] fixing failing unit test --- cqlengine/tests/columns/test_container_columns.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cqlengine/tests/columns/test_container_columns.py b/cqlengine/tests/columns/test_container_columns.py index 28dca78366..914c7479a8 100644 --- a/cqlengine/tests/columns/test_container_columns.py +++ b/cqlengine/tests/columns/test_container_columns.py @@ -138,7 +138,7 @@ def test_partial_update_creation(self): col = columns.List(columns.Integer, db_field="TEST") statements = col.get_update_statement(final, initial, ctx) - assert len([v for v in ctx.values() if [0,1,2] == v.value]) == 1 + assert len([v for v in ctx.values() if [2,1,0] == v.value]) == 1 assert len([v for v in ctx.values() if [7,8,9] == v.value]) == 1 assert len([s for s in statements if '"TEST" = "TEST" +' in s]) == 1 assert len([s for s in statements if '+ "TEST"' in s]) == 1 From 7f1d0c9e80e692c3244ebfd61a00b1bce0e6a846 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Mon, 11 Mar 2013 18:59:27 -0700 Subject: [PATCH 0131/3726] adding user configurable keyspace --- cqlengine/connection.py | 8 ++++++-- cqlengine/management.py | 9 +++++---- cqlengine/models.py | 11 +++++++++-- cqlengine/query.py | 5 ++++- cqlengine/tests/base.py | 2 +- 5 files changed, 25 insertions(+), 10 deletions(-) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index 52bc5cdef1..9be7922579 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -23,7 +23,7 @@ class CQLConnectionError(CQLEngineException): pass _password = None _max_connections = 10 -def setup(hosts, username=None, password=None, max_connections=10): +def setup(hosts, username=None, password=None, max_connections=10, default_keyspace=None): """ Records the hosts and connects to one of them @@ -36,7 +36,11 @@ def setup(hosts, username=None, password=None, max_connections=10): _username = username _password = password _max_connections = max_connections - + + if default_keyspace: + from cqlengine import models + models.DEFAULT_KEYSPACE = default_keyspace + for host in hosts: host = host.strip() host = host.split(':') diff --git a/cqlengine/management.py b/cqlengine/management.py index ba8b8145f7..5ef536cf7d 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -14,7 +14,8 @@ def create_keyspace(name, strategy_class='SimpleStrategy', replication_factor=3, :param **replication_values: 1.2 only, additional values to ad to the replication data map """ with connection_manager() as con: - if name not in [k.name for k in con.con.client.describe_keyspaces()]: + if not any([name == k.name for k in con.con.client.describe_keyspaces()]): +# if name not in [k.name for k in con.con.client.describe_keyspaces()]: try: #Try the 1.1 method con.execute("""CREATE KEYSPACE {} @@ -50,11 +51,11 @@ def create_table(model, create_missing_keyspace=True): #create missing keyspace if create_missing_keyspace: - create_keyspace(model.keyspace) + create_keyspace(model._get_keyspace()) with connection_manager() as con: #check for an existing column family - ks_info = con.con.client.describe_keyspace(model.keyspace) + ks_info = con.con.client.describe_keyspace(model._get_keyspace()) if not any([raw_cf_name == cf.name for cf in ks_info.cf_defs]): qs = ['CREATE TABLE {}'.format(cf_name)] @@ -85,7 +86,7 @@ def add_column(col): raise #get existing index names, skip ones that already exist - ks_info = con.con.client.describe_keyspace(model.keyspace) + ks_info = con.con.client.describe_keyspace(model._get_keyspace()) cf_defs = [cf for cf in ks_info.cf_defs if cf.name == raw_cf_name] idx_names = [i.index_name for i in cf_defs[0].column_metadata] if cf_defs else [] idx_names = filter(None, idx_names) diff --git a/cqlengine/models.py b/cqlengine/models.py index 9b6ec3b084..b9801bf0bf 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -8,6 +8,8 @@ class ModelDefinitionException(ModelException): pass +DEFAULT_KEYSPACE = 'cqlengine' + class hybrid_classmethod(object): """ Allows a method to behave as both a class method and @@ -37,7 +39,7 @@ class MultipleObjectsReturned(QueryException): pass table_name = None #the keyspace for this model - keyspace = 'cqlengine' + keyspace = None read_repair_chance = 0.1 def __init__(self, **values): @@ -64,6 +66,11 @@ def _can_update(self): pks = self._primary_keys.keys() return all([not self._values[k].changed for k in self._primary_keys]) + @classmethod + def _get_keyspace(cls): + """ Returns the manual keyspace, if set, otherwise the default keyspace """ + return cls.keyspace or DEFAULT_KEYSPACE + def __eq__(self, other): return self.as_dict() == other.as_dict() @@ -93,7 +100,7 @@ def column_family_name(cls, include_keyspace=True): cf_name = cf_name.lower() cf_name = re.sub(r'^_+', '', cf_name) if not include_keyspace: return cf_name - return '{}.{}'.format(cls.keyspace, cf_name) + return '{}.{}'.format(cls._get_keyspace(), cf_name) @property def pk(self): diff --git a/cqlengine/query.py b/cqlengine/query.py index 29ab13962b..36909f1e44 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -184,7 +184,6 @@ class QuerySet(object): def __init__(self, model): super(QuerySet, self).__init__() self.model = model - self.column_family_name = self.model.column_family_name() #Where clause filters self._where = [] @@ -210,6 +209,10 @@ def __init__(self, model): self._batch = None + @property + def column_family_name(self): + return self.model.column_family_name() + def __unicode__(self): return self._select_query() diff --git a/cqlengine/tests/base.py b/cqlengine/tests/base.py index d94ea69b04..0b450cdd8b 100644 --- a/cqlengine/tests/base.py +++ b/cqlengine/tests/base.py @@ -7,7 +7,7 @@ class BaseCassEngTestCase(TestCase): def setUpClass(cls): super(BaseCassEngTestCase, cls).setUpClass() if not connection._hosts: - connection.setup(['localhost']) + connection.setup(['localhost'], default_keyspace='cqlengine_test') def assertHasAttr(self, obj, attr): self.assertTrue(hasattr(obj, attr), From f04c831ec0b69f4eb44bf6f08b1592dcc4c86b1b Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Tue, 12 Mar 2013 10:11:55 -0700 Subject: [PATCH 0132/3726] fixing bug caused by batch objects on querysets being copied --- cqlengine/query.py | 6 ++++++ cqlengine/tests/test_batch_query.py | 16 ++++++++++++++-- 2 files changed, 20 insertions(+), 2 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index 36909f1e44..c148f8e44e 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -227,6 +227,12 @@ def __deepcopy__(self, memo): for k,v in self.__dict__.items(): if k in ['_con', '_cur', '_result_cache', '_result_idx']: clone.__dict__[k] = None + elif k == '_batch': + # we need to keep the same batch instance across + # all queryset clones, otherwise the batched queries + # fly off into other batch instances which are never + # executed, thx @dokai + clone.__dict__[k] = self._batch else: clone.__dict__[k] = copy.deepcopy(v, memo) diff --git a/cqlengine/tests/test_batch_query.py b/cqlengine/tests/test_batch_query.py index c6d509ebad..fb4aab4a72 100644 --- a/cqlengine/tests/test_batch_query.py +++ b/cqlengine/tests/test_batch_query.py @@ -31,8 +31,6 @@ def setUp(self): for obj in TestMultiKeyModel.filter(partition=self.pkey): obj.delete() - - def test_insert_success_case(self): b = BatchQuery() @@ -90,4 +88,18 @@ def test_context_manager(self): for i in range(5): TestMultiKeyModel.get(partition=self.pkey, cluster=i) + def test_bulk_delete_success_case(self): + + for i in range(1): + for j in range(5): + TestMultiKeyModel.create(partition=i, cluster=j, count=i*j, text='{}:{}'.format(i,j)) + + with BatchQuery() as b: + TestMultiKeyModel.objects.batch(b).filter(partition=0).delete() + assert TestMultiKeyModel.filter(partition=0).count() == 5 + + assert TestMultiKeyModel.filter(partition=0).count() == 0 + #cleanup + for m in TestMultiKeyModel.all(): + m.delete() From 1dd34e0029068a1eb9dcff75f6ac639e69c5b2ee Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Tue, 12 Mar 2013 21:57:23 -0700 Subject: [PATCH 0133/3726] updating batch query to work with 1.2 --- cqlengine/query.py | 22 +++++++++------------- cqlengine/tests/test_batch_query.py | 2 +- 2 files changed, 10 insertions(+), 14 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index c148f8e44e..2355edeadf 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -126,22 +126,20 @@ class LessThanOrEqualOperator(QueryOperator): symbol = "LTE" cql_symbol = '<=' -class Consistency(object): - ANY = 'ANY' - ONE = 'ONE' - QUORUM = 'QUORUM' - LOCAL_QUORUM = 'LOCAL_QUORUM' - EACH_QUORUM = 'EACH_QUORUM' - ALL = 'ALL' +class BatchType(object): + Unlogged = 'UNLOGGED' + Counter = 'COUNTER' class BatchQuery(object): """ Handles the batching of queries + + http://www.datastax.com/docs/1.2/cql_cli/cql/BATCH """ - def __init__(self, consistency=None, timestamp=None): + def __init__(self, batch_type=None, timestamp=None): self.queries = [] - self.consistency = consistency + self.batch_type = batch_type if timestamp is not None and not isinstance(timestamp, datetime): raise CQLEngineException('timestamp object must be an instance of datetime') self.timestamp = timestamp @@ -150,13 +148,11 @@ def add_query(self, query, params): self.queries.append((query, params)) def execute(self): - opener = 'BEGIN BATCH' - if self.consistency: - opener += ' USING CONSISTENCY {}'.format(self.consistency) + opener = 'BEGIN ' + (self.batch_type + ' ' if self.batch_type else '') + ' BATCH' if self.timestamp: epoch = datetime(1970, 1, 1) ts = long((self.timestamp - epoch).total_seconds() * 1000) - opener += ' TIMESTAMP {}'.format(ts) + opener += ' USING TIMESTAMP {}'.format(ts) query_list = [opener] parameters = {} diff --git a/cqlengine/tests/test_batch_query.py b/cqlengine/tests/test_batch_query.py index fb4aab4a72..6ba608778c 100644 --- a/cqlengine/tests/test_batch_query.py +++ b/cqlengine/tests/test_batch_query.py @@ -3,7 +3,7 @@ import random from cqlengine import Model, columns from cqlengine.management import delete_table, create_table -from cqlengine.query import BatchQuery, Consistency +from cqlengine.query import BatchQuery from cqlengine.tests.base import BaseCassEngTestCase class TestMultiKeyModel(Model): From fb99a4d70e63f6a2541f54175b89b20b0b9ad865 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Tue, 12 Mar 2013 21:57:23 -0700 Subject: [PATCH 0134/3726] updating batch query to work with 1.2 --- cqlengine/query.py | 22 ++--- cqlengine/tests/query/test_batch_query.py | 106 ++++++++++++++++++++++ cqlengine/tests/test_batch_query.py | 2 +- 3 files changed, 116 insertions(+), 14 deletions(-) create mode 100644 cqlengine/tests/query/test_batch_query.py diff --git a/cqlengine/query.py b/cqlengine/query.py index c148f8e44e..2355edeadf 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -126,22 +126,20 @@ class LessThanOrEqualOperator(QueryOperator): symbol = "LTE" cql_symbol = '<=' -class Consistency(object): - ANY = 'ANY' - ONE = 'ONE' - QUORUM = 'QUORUM' - LOCAL_QUORUM = 'LOCAL_QUORUM' - EACH_QUORUM = 'EACH_QUORUM' - ALL = 'ALL' +class BatchType(object): + Unlogged = 'UNLOGGED' + Counter = 'COUNTER' class BatchQuery(object): """ Handles the batching of queries + + http://www.datastax.com/docs/1.2/cql_cli/cql/BATCH """ - def __init__(self, consistency=None, timestamp=None): + def __init__(self, batch_type=None, timestamp=None): self.queries = [] - self.consistency = consistency + self.batch_type = batch_type if timestamp is not None and not isinstance(timestamp, datetime): raise CQLEngineException('timestamp object must be an instance of datetime') self.timestamp = timestamp @@ -150,13 +148,11 @@ def add_query(self, query, params): self.queries.append((query, params)) def execute(self): - opener = 'BEGIN BATCH' - if self.consistency: - opener += ' USING CONSISTENCY {}'.format(self.consistency) + opener = 'BEGIN ' + (self.batch_type + ' ' if self.batch_type else '') + ' BATCH' if self.timestamp: epoch = datetime(1970, 1, 1) ts = long((self.timestamp - epoch).total_seconds() * 1000) - opener += ' TIMESTAMP {}'.format(ts) + opener += ' USING TIMESTAMP {}'.format(ts) query_list = [opener] parameters = {} diff --git a/cqlengine/tests/query/test_batch_query.py b/cqlengine/tests/query/test_batch_query.py new file mode 100644 index 0000000000..5b31826a39 --- /dev/null +++ b/cqlengine/tests/query/test_batch_query.py @@ -0,0 +1,106 @@ +from datetime import datetime +from unittest import skip +from uuid import uuid4 +import random +from cqlengine import Model, columns +from cqlengine.management import delete_table, create_table +from cqlengine.query import BatchQuery +from cqlengine.tests.base import BaseCassEngTestCase + +class TestMultiKeyModel(Model): + partition = columns.Integer(primary_key=True) + cluster = columns.Integer(primary_key=True) + count = columns.Integer(required=False) + text = columns.Text(required=False) + +class BatchQueryTests(BaseCassEngTestCase): + + @classmethod + def setUpClass(cls): + super(BatchQueryTests, cls).setUpClass() + delete_table(TestMultiKeyModel) + create_table(TestMultiKeyModel) + + @classmethod + def tearDownClass(cls): + super(BatchQueryTests, cls).tearDownClass() + delete_table(TestMultiKeyModel) + + def setUp(self): + super(BatchQueryTests, self).setUp() + self.pkey = 1 + for obj in TestMultiKeyModel.filter(partition=self.pkey): + obj.delete() + + def test_insert_success_case(self): + + b = BatchQuery() + inst = TestMultiKeyModel.batch(b).create(partition=self.pkey, cluster=2, count=3, text='4') + + with self.assertRaises(TestMultiKeyModel.DoesNotExist): + TestMultiKeyModel.get(partition=self.pkey, cluster=2) + + b.execute() + + TestMultiKeyModel.get(partition=self.pkey, cluster=2) + + def test_update_success_case(self): + + inst = TestMultiKeyModel.create(partition=self.pkey, cluster=2, count=3, text='4') + + b = BatchQuery() + + inst.count = 4 + inst.batch(b).save() + + inst2 = TestMultiKeyModel.get(partition=self.pkey, cluster=2) + assert inst2.count == 3 + + b.execute() + + inst3 = TestMultiKeyModel.get(partition=self.pkey, cluster=2) + assert inst3.count == 4 + + def test_delete_success_case(self): + + inst = TestMultiKeyModel.create(partition=self.pkey, cluster=2, count=3, text='4') + + b = BatchQuery() + + inst.batch(b).delete() + + TestMultiKeyModel.get(partition=self.pkey, cluster=2) + + b.execute() + + with self.assertRaises(TestMultiKeyModel.DoesNotExist): + TestMultiKeyModel.get(partition=self.pkey, cluster=2) + + def test_context_manager(self): + + with BatchQuery() as b: + for i in range(5): + TestMultiKeyModel.batch(b).create(partition=self.pkey, cluster=i, count=3, text='4') + + for i in range(5): + with self.assertRaises(TestMultiKeyModel.DoesNotExist): + TestMultiKeyModel.get(partition=self.pkey, cluster=i) + + for i in range(5): + TestMultiKeyModel.get(partition=self.pkey, cluster=i) + + def test_bulk_delete_success_case(self): + + for i in range(1): + for j in range(5): + TestMultiKeyModel.create(partition=i, cluster=j, count=i*j, text='{}:{}'.format(i,j)) + + with BatchQuery() as b: + TestMultiKeyModel.objects.batch(b).filter(partition=0).delete() + assert TestMultiKeyModel.filter(partition=0).count() == 5 + + assert TestMultiKeyModel.filter(partition=0).count() == 0 + #cleanup + for m in TestMultiKeyModel.all(): + m.delete() + diff --git a/cqlengine/tests/test_batch_query.py b/cqlengine/tests/test_batch_query.py index fb4aab4a72..6ba608778c 100644 --- a/cqlengine/tests/test_batch_query.py +++ b/cqlengine/tests/test_batch_query.py @@ -3,7 +3,7 @@ import random from cqlengine import Model, columns from cqlengine.management import delete_table, create_table -from cqlengine.query import BatchQuery, Consistency +from cqlengine.query import BatchQuery from cqlengine.tests.base import BaseCassEngTestCase class TestMultiKeyModel(Model): From 827bf18d6b87640ca37ae60ffd4b636d42a79b24 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mariusz=20Kry=C5=84ski?= Date: Wed, 13 Mar 2013 23:15:31 +0100 Subject: [PATCH 0135/3726] include cf name in auto generated index name --- cqlengine/management.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cqlengine/management.py b/cqlengine/management.py index 5ef536cf7d..67c34f6841 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -95,7 +95,7 @@ def add_column(col): if indexes: for column in indexes: if column.db_index_name in idx_names: continue - qs = ['CREATE INDEX {}'.format(column.db_index_name)] + qs = ['CREATE INDEX index_{}_{}'.format(raw_cf_name, column.db_field_name)] qs += ['ON {}'.format(cf_name)] qs += ['("{}")'.format(column.db_field_name)] qs = ' '.join(qs) From fa5547573d478ee57ac0e29ac9bc9c89dc2e596f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mariusz=20Kry=C5=84ski?= Date: Wed, 13 Mar 2013 23:23:17 +0100 Subject: [PATCH 0136/3726] enable indexes on models with multiple primary keys (supported since cassandra 1.2) --- cqlengine/models.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/cqlengine/models.py b/cqlengine/models.py index b9801bf0bf..aec06b4f89 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -230,12 +230,6 @@ def _transform_column(col_name, col_obj): raise ModelException("{} defines the column {} more than once".format(name, v.db_field_name)) col_names.add(v.db_field_name) - #check for indexes on models with multiple primary keys - if len([1 for k,v in column_definitions if v.primary_key]) > 1: - if len([1 for k,v in column_definitions if v.index]) > 0: - raise ModelDefinitionException( - 'Indexes on models with multiple primary keys is not supported') - #create db_name -> model name map for loading db_map = {} for field_name, col in column_dict.items(): From 1481391fa6ee3c7f993a1057beaaad7926c5dba2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mariusz=20Kry=C5=84ski?= Date: Fri, 15 Mar 2013 13:55:05 +0100 Subject: [PATCH 0137/3726] connection.setup 'lazy' parameter --- cqlengine/connection.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index 9be7922579..e0afffdb8c 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -23,7 +23,7 @@ class CQLConnectionError(CQLEngineException): pass _password = None _max_connections = 10 -def setup(hosts, username=None, password=None, max_connections=10, default_keyspace=None): +def setup(hosts, username=None, password=None, max_connections=10, default_keyspace=None, lazy=False): """ Records the hosts and connects to one of them @@ -55,9 +55,10 @@ def setup(hosts, username=None, password=None, max_connections=10, default_keysp raise CQLConnectionError("At least one host required") random.shuffle(_hosts) - - con = ConnectionPool.get() - ConnectionPool.put(con) + + if not lazy: + con = ConnectionPool.get() + ConnectionPool.put(con) class ConnectionPool(object): From f94283f0335126178952c2fc9c09bb66d15ead4d Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Wed, 20 Mar 2013 19:24:33 -0700 Subject: [PATCH 0138/3726] fixing in operator per https://github.com/bdeggleston/cqlengine/issues/34 --- cqlengine/query.py | 21 +++++++++++++++++++++ cqlengine/tests/query/test_queryset.py | 4 ++++ 2 files changed, 25 insertions(+) diff --git a/cqlengine/query.py b/cqlengine/query.py index 2355edeadf..0e2fd4e3a1 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -110,6 +110,27 @@ class InOperator(EqualsOperator): symbol = 'IN' cql_symbol = 'IN' + class Quoter(object): + """ + contains a single value, which will quote itself for CQL insertion statements + """ + def __init__(self, value): + self.value = value + + def __str__(self): + from cql.query import cql_quote as cq + return '(' + ', '.join([cq(v) for v in self.value]) + ')' + + def get_dict(self): + if isinstance(self.value, BaseQueryFunction): + return {self.identifier: self.column.to_database(self.value.get_value())} + else: + try: + values = [v for v in self.value] + except TypeError: + raise QueryException("in operator arguments must be iterable, {} found".format(self.value)) + return {self.identifier: self.Quoter([self.column.to_database(v) for v in self.value])} + class GreaterThanOperator(QueryOperator): symbol = "GT" cql_symbol = '>' diff --git a/cqlengine/tests/query/test_queryset.py b/cqlengine/tests/query/test_queryset.py index 99f45afacd..cc003079e0 100644 --- a/cqlengine/tests/query/test_queryset.py +++ b/cqlengine/tests/query/test_queryset.py @@ -436,7 +436,11 @@ def test_success_case(self): assert '4' in datas +class TestInOperator(BaseQuerySetUsage): + def test_success_case(self): + q = TestModel.filter(test_id__in=[0,1]) + assert q.count() == 8 From e10237e8558e2058eee56a28396feea4cb25d50e Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Wed, 20 Mar 2013 19:28:33 -0700 Subject: [PATCH 0139/3726] changing list column behavior to load lists, not tuples, from the database --- cqlengine/columns.py | 4 ++++ cqlengine/tests/columns/test_container_columns.py | 4 ++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index fd5fc7c9ca..f63afff1de 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -433,6 +433,10 @@ def validate(self, value): raise ValidationError('{} is not a list object'.format(val)) return [self.value_col.validate(v) for v in val] + def to_python(self, value): + if value is None: return None + return list(value) + def to_database(self, value): if value is None: return None if isinstance(value, self.Quoter): return value diff --git a/cqlengine/tests/columns/test_container_columns.py b/cqlengine/tests/columns/test_container_columns.py index 914c7479a8..b598da56bf 100644 --- a/cqlengine/tests/columns/test_container_columns.py +++ b/cqlengine/tests/columns/test_container_columns.py @@ -95,8 +95,8 @@ def test_io_success(self): m1 = TestListModel.create(int_list=[1,2], text_list=['kai', 'andreas']) m2 = TestListModel.get(partition=m1.partition) - assert isinstance(m2.int_list, tuple) - assert isinstance(m2.text_list, tuple) + assert isinstance(m2.int_list, list) + assert isinstance(m2.text_list, list) assert len(m2.int_list) == 2 assert len(m2.text_list) == 2 From cad39537efecb9847ddab2dda9bbe2add6fd0be6 Mon Sep 17 00:00:00 2001 From: Kai Lautaportti Date: Fri, 22 Mar 2013 09:52:18 +0200 Subject: [PATCH 0140/3726] Ignore empty batches instead of sending invalid CQL to Cassandra. --- cqlengine/query.py | 12 ++++++++---- cqlengine/tests/test_batch_query.py | 6 ++++++ 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index 0e2fd4e3a1..3079986744 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -169,6 +169,10 @@ def add_query(self, query, params): self.queries.append((query, params)) def execute(self): + if len(self.queries) == 0: + # Empty batch is a no-op + return + opener = 'BEGIN ' + (self.batch_type + ' ' if self.batch_type else '') + ' BATCH' if self.timestamp: epoch = datetime(1970, 1, 1) @@ -257,7 +261,7 @@ def __deepcopy__(self, memo): def __len__(self): return self.count() - + def __del__(self): if self._con: self._con.close() @@ -347,7 +351,7 @@ def _fill_result_cache_to_idx(self, idx): value_dict = dict(zip(names, values)) self._result_idx += 1 self._result_cache[self._result_idx] = self._construct_instance(value_dict) - + #return the connection to the connection pool if we have all objects if self._result_cache and self._result_cache[-1] is not None: self._con.close() @@ -389,7 +393,7 @@ def __getitem__(self, s): else: self._fill_result_cache_to_idx(s) return self._result_cache[s] - + def _construct_instance(self, values): #translate column names to model names @@ -476,7 +480,7 @@ def get(self, **kwargs): '{} objects found'.format(len(self._result_cache))) else: return self[0] - + def order_by(self, colname): """ orders the result set. diff --git a/cqlengine/tests/test_batch_query.py b/cqlengine/tests/test_batch_query.py index 6ba608778c..7ee98b7084 100644 --- a/cqlengine/tests/test_batch_query.py +++ b/cqlengine/tests/test_batch_query.py @@ -103,3 +103,9 @@ def test_bulk_delete_success_case(self): for m in TestMultiKeyModel.all(): m.delete() + def test_empty_batch(self): + b = BatchQuery() + b.execute() + + with BatchQuery() as b: + pass From beb1f48760cb597f179fb7286b15def4cb4a79fe Mon Sep 17 00:00:00 2001 From: Kai Lautaportti Date: Fri, 22 Mar 2013 10:11:45 +0200 Subject: [PATCH 0141/3726] Allow multiple ORDER BY conditions to be set. Supports both .order_by('foo', 'bar') and chained .order_by() calls. --- cqlengine/query.py | 52 ++++++++++++++------------ cqlengine/tests/query/test_queryset.py | 38 +++++++++++++++---- 2 files changed, 58 insertions(+), 32 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index 0e2fd4e3a1..7082bf9905 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -206,7 +206,7 @@ def __init__(self, model): self._where = [] #ordering arguments - self._order = None + self._order = [] self._allow_filtering = False @@ -257,7 +257,7 @@ def __deepcopy__(self, memo): def __len__(self): return self.count() - + def __del__(self): if self._con: self._con.close() @@ -313,7 +313,7 @@ def _select_query(self): qs += ['WHERE {}'.format(self._where_clause())] if self._order: - qs += ['ORDER BY {}'.format(self._order)] + qs += ['ORDER BY {}'.format(', '.join(self._order))] if self._limit: qs += ['LIMIT {}'.format(self._limit)] @@ -347,7 +347,7 @@ def _fill_result_cache_to_idx(self, idx): value_dict = dict(zip(names, values)) self._result_idx += 1 self._result_cache[self._result_idx] = self._construct_instance(value_dict) - + #return the connection to the connection pool if we have all objects if self._result_cache and self._result_cache[-1] is not None: self._con.close() @@ -389,7 +389,7 @@ def __getitem__(self, s): else: self._fill_result_cache_to_idx(s) return self._result_cache[s] - + def _construct_instance(self, values): #translate column names to model names @@ -476,38 +476,42 @@ def get(self, **kwargs): '{} objects found'.format(len(self._result_cache))) else: return self[0] - - def order_by(self, colname): + + def order_by(self, *colnames): """ orders the result set. - ordering can only select one column, and it must be the second column in a composite primary key + ordering can only use clustering columns. Default order is ascending, prepend a '-' to the column name for descending """ - if colname is None: + if len(colnames) == 0: clone = copy.deepcopy(self) - clone._order = None + clone._order = [] return clone - order_type = 'DESC' if colname.startswith('-') else 'ASC' - colname = colname.replace('-', '') + conditions = [] + for colname in colnames: + order_type = 'DESC' if colname.startswith('-') else 'ASC' + colname = colname.replace('-', '') - column = self.model._columns.get(colname) - if column is None: - raise QueryException("Can't resolve the column name: '{}'".format(colname)) + column = self.model._columns.get(colname) + if column is None: + raise QueryException("Can't resolve the column name: '{}'".format(colname)) - #validate the column selection - if not column.primary_key: - raise QueryException( - "Can't order on '{}', can only order on (clustered) primary keys".format(colname)) + #validate the column selection + if not column.primary_key: + raise QueryException( + "Can't order on '{}', can only order on (clustered) primary keys".format(colname)) - pks = [v for k,v in self.model._columns.items() if v.primary_key] - if column == pks[0]: - raise QueryException( - "Can't order by the first primary key, clustering (secondary) keys only") + pks = [v for k, v in self.model._columns.items() if v.primary_key] + if column == pks[0]: + raise QueryException( + "Can't order by the first primary key (partition key), clustering (secondary) keys only") + + conditions.append('"{}" {}'.format(column.db_field_name, order_type)) clone = copy.deepcopy(self) - clone._order = '"{}" {}'.format(column.db_field_name, order_type) + clone._order.extend(conditions) return clone def count(self): diff --git a/cqlengine/tests/query/test_queryset.py b/cqlengine/tests/query/test_queryset.py index cc003079e0..660fb8db7f 100644 --- a/cqlengine/tests/query/test_queryset.py +++ b/cqlengine/tests/query/test_queryset.py @@ -26,6 +26,12 @@ class IndexedTestModel(Model): expected_result = columns.Integer() test_result = columns.Integer(index=True) +class TestMultiClusteringModel(Model): + one = columns.Integer(primary_key=True) + two = columns.Integer(primary_key=True) + three = columns.Integer(primary_key=True) + + class TestQuerySetOperation(BaseCassEngTestCase): def test_query_filter_parsing(self): @@ -115,6 +121,7 @@ def setUpClass(cls): delete_table(IndexedTestModel) create_table(TestModel) create_table(IndexedTestModel) + create_table(TestMultiClusteringModel) TestModel.objects.create(test_id=0, attempt_id=0, description='try1', expected_result=5, test_result=30) TestModel.objects.create(test_id=0, attempt_id=1, description='try2', expected_result=10, test_result=30) @@ -151,6 +158,7 @@ def tearDownClass(cls): super(BaseQuerySetUsage, cls).tearDownClass() delete_table(TestModel) delete_table(IndexedTestModel) + delete_table(TestMultiClusteringModel) class TestQuerySetCountSelectionAndIteration(BaseQuerySetUsage): @@ -179,7 +187,7 @@ def test_iteration(self): assert val in compare_set compare_set.remove(val) assert len(compare_set) == 0 - + def test_multiple_iterations_work_properly(self): """ Tests that iterating over a query set more than once works """ q = TestModel.objects(test_id=0) @@ -277,6 +285,20 @@ def test_ordering_on_indexed_columns_fails(self): with self.assertRaises(query.QueryException): q = IndexedTestModel.objects(test_id=0).order_by('attempt_id') + def test_ordering_on_multiple_clustering_columns(self): + TestMultiClusteringModel.create(one=1, two=1, three=4) + TestMultiClusteringModel.create(one=1, two=1, three=2) + TestMultiClusteringModel.create(one=1, two=1, three=5) + TestMultiClusteringModel.create(one=1, two=1, three=1) + TestMultiClusteringModel.create(one=1, two=1, three=3) + + results = TestMultiClusteringModel.objects.filter(one=1, two=1).order_by('-two', '-three') + assert [r.three for r in results] == [5, 4, 3, 2, 1] + + results = TestMultiClusteringModel.objects.filter(one=1, two=1).order_by('two', 'three') + assert [r.three for r in results] == [1, 2, 3, 4, 5] + + class TestQuerySetSlicing(BaseQuerySetUsage): def test_out_of_range_index_raises_error(self): @@ -344,12 +366,12 @@ def test_delete(self): TestModel.objects.create(test_id=3, attempt_id=1, description='try10', expected_result=60, test_result=40) TestModel.objects.create(test_id=3, attempt_id=2, description='try11', expected_result=70, test_result=45) TestModel.objects.create(test_id=3, attempt_id=3, description='try12', expected_result=75, test_result=45) - + assert TestModel.objects.count() == 16 assert TestModel.objects(test_id=3).count() == 4 - + TestModel.objects(test_id=3).delete() - + assert TestModel.objects.count() == 12 assert TestModel.objects(test_id=3).count() == 0 @@ -364,7 +386,7 @@ def test_delete_without_any_where_args(self): TestModel.objects(attempt_id=0).delete() class TestQuerySetConnectionHandling(BaseQuerySetUsage): - + def test_conn_is_returned_after_filling_cache(self): """ Tests that the queryset returns it's connection after it's fetched all of it's results @@ -376,10 +398,10 @@ def test_conn_is_returned_after_filling_cache(self): val = t.attempt_id, t.expected_result assert val in compare_set compare_set.remove(val) - + assert q._con is None assert q._cur is None - + def test_conn_is_returned_after_queryset_is_garbage_collected(self): """ Tests that the connection is returned to the connection pool after the queryset is gc'd """ from cqlengine.connection import ConnectionPool @@ -387,7 +409,7 @@ def test_conn_is_returned_after_queryset_is_garbage_collected(self): q = TestModel.objects(test_id=0) v = q[0] assert ConnectionPool._queue.qsize() == 0 - + del q assert ConnectionPool._queue.qsize() == 1 From c4d4eb874e79250b855199c375c6f53dd4fc01ea Mon Sep 17 00:00:00 2001 From: Kai Lautaportti Date: Fri, 22 Mar 2013 10:20:03 +0200 Subject: [PATCH 0142/3726] Added a derived Date type. Internally it is stored as a timestamp but the value is exposed as a instance. --- cqlengine/columns.py | 32 ++++++++++++++-- cqlengine/tests/columns/test_validation.py | 43 +++++++++++++++++++--- 2 files changed, 67 insertions(+), 8 deletions(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index f63afff1de..5cbe6a0946 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -1,6 +1,7 @@ #column field types from copy import copy from datetime import datetime +from datetime import date import re from uuid import uuid1, uuid4 from cql.query import cql_quote @@ -216,6 +217,31 @@ def to_database(self, value): epoch = datetime(1970, 1, 1) return long((value - epoch).total_seconds() * 1000) + +class Date(Column): + db_type = 'timestamp' + + def __init__(self, **kwargs): + super(Date, self).__init__(**kwargs) + + def to_python(self, value): + if isinstance(value, datetime): + return value.date() + elif isinstance(value, date): + return value + + return date.fromtimestamp(value) + + def to_database(self, value): + value = super(Date, self).to_database(value) + if isinstance(value, datetime): + value = value.date() + if not isinstance(value, date): + raise ValidationError("'{}' is not a date object".format(repr(value))) + + return long((value - date(1970, 1, 1)).total_seconds() * 1000) + + class UUID(Column): """ Type 1 or 4 UUID @@ -235,14 +261,14 @@ def validate(self, value): if not self.re_uuid.match(val): raise ValidationError("{} is not a valid uuid".format(value)) return _UUID(val) - + class TimeUUID(UUID): """ UUID containing timestamp """ - + db_type = 'timeuuid' - + def __init__(self, **kwargs): kwargs.setdefault('default', lambda: uuid1()) super(TimeUUID, self).__init__(**kwargs) diff --git a/cqlengine/tests/columns/test_validation.py b/cqlengine/tests/columns/test_validation.py index 4921b67256..44903a4c85 100644 --- a/cqlengine/tests/columns/test_validation.py +++ b/cqlengine/tests/columns/test_validation.py @@ -1,5 +1,6 @@ #tests the behavior of the column classes from datetime import datetime +from datetime import date from decimal import Decimal as D from cqlengine import ValidationError @@ -11,6 +12,7 @@ from cqlengine.columns import Text from cqlengine.columns import Integer from cqlengine.columns import DateTime +from cqlengine.columns import Date from cqlengine.columns import UUID from cqlengine.columns import Boolean from cqlengine.columns import Float @@ -40,6 +42,37 @@ def test_datetime_io(self): dt2 = self.DatetimeTest.objects(test_id=0).first() assert dt2.created_at.timetuple()[:6] == now.timetuple()[:6] + +class TestDate(BaseCassEngTestCase): + class DateTest(Model): + test_id = Integer(primary_key=True) + created_at = Date() + + @classmethod + def setUpClass(cls): + super(TestDate, cls).setUpClass() + create_table(cls.DateTest) + + @classmethod + def tearDownClass(cls): + super(TestDate, cls).tearDownClass() + delete_table(cls.DateTest) + + def test_date_io(self): + today = date.today() + self.DateTest.objects.create(test_id=0, created_at=today) + dt2 = self.DateTest.objects(test_id=0).first() + assert dt2.created_at.isoformat() == today.isoformat() + + def test_date_io_using_datetime(self): + now = datetime.utcnow() + self.DateTest.objects.create(test_id=0, created_at=now) + dt2 = self.DateTest.objects(test_id=0).first() + assert not isinstance(dt2.created_at, datetime) + assert isinstance(dt2.created_at, date) + assert dt2.created_at.isoformat() == now.date().isoformat() + + class TestDecimal(BaseCassEngTestCase): class DecimalTest(Model): test_id = Integer(primary_key=True) @@ -63,26 +96,26 @@ def test_datetime_io(self): dt = self.DecimalTest.objects.create(test_id=0, dec_val=5) dt2 = self.DecimalTest.objects(test_id=0).first() assert dt2.dec_val == D('5') - + class TestTimeUUID(BaseCassEngTestCase): class TimeUUIDTest(Model): test_id = Integer(primary_key=True) timeuuid = TimeUUID() - + @classmethod def setUpClass(cls): super(TestTimeUUID, cls).setUpClass() create_table(cls.TimeUUIDTest) - + @classmethod def tearDownClass(cls): super(TestTimeUUID, cls).tearDownClass() delete_table(cls.TimeUUIDTest) - + def test_timeuuid_io(self): t0 = self.TimeUUIDTest.create(test_id=0) t1 = self.TimeUUIDTest.get(test_id=0) - + assert t1.timeuuid.time == t1.timeuuid.time class TestInteger(BaseCassEngTestCase): From 1cffc497e394f1d62b6d6f09d1217b013ea91f61 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sat, 23 Mar 2013 21:54:54 -0700 Subject: [PATCH 0143/3726] adding test around chaining order_by statements --- cqlengine/tests/base.py | 2 +- cqlengine/tests/query/test_queryset.py | 3 +++ requirements.txt | 2 +- 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/cqlengine/tests/base.py b/cqlengine/tests/base.py index 0b450cdd8b..e9fa8a9b19 100644 --- a/cqlengine/tests/base.py +++ b/cqlengine/tests/base.py @@ -7,7 +7,7 @@ class BaseCassEngTestCase(TestCase): def setUpClass(cls): super(BaseCassEngTestCase, cls).setUpClass() if not connection._hosts: - connection.setup(['localhost'], default_keyspace='cqlengine_test') + connection.setup(['localhost:9170'], default_keyspace='cqlengine_test') def assertHasAttr(self, obj, attr): self.assertTrue(hasattr(obj, attr), diff --git a/cqlengine/tests/query/test_queryset.py b/cqlengine/tests/query/test_queryset.py index 660fb8db7f..d5e6fe16db 100644 --- a/cqlengine/tests/query/test_queryset.py +++ b/cqlengine/tests/query/test_queryset.py @@ -298,6 +298,9 @@ def test_ordering_on_multiple_clustering_columns(self): results = TestMultiClusteringModel.objects.filter(one=1, two=1).order_by('two', 'three') assert [r.three for r in results] == [1, 2, 3, 4, 5] + results = TestMultiClusteringModel.objects.filter(one=1, two=1).order_by('two').order_by('three') + assert [r.three for r in results] == [1, 2, 3, 4, 5] + class TestQuerySetSlicing(BaseQuerySetUsage): diff --git a/requirements.txt b/requirements.txt index 2415255265..04a6bc4465 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -cql==1.2.0 +cql==1.4.0 ipython==0.13.1 ipdb==0.7 Sphinx==1.1.3 From 92e37c96c81211d57265661dc16b1bccaee40ed6 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sat, 23 Mar 2013 22:01:46 -0700 Subject: [PATCH 0144/3726] making date deserialize with utc timezone --- cqlengine/columns.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index 5cbe6a0946..76830d2c27 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -230,7 +230,7 @@ def to_python(self, value): elif isinstance(value, date): return value - return date.fromtimestamp(value) + return datetime.utcfromtimestamp(value).date() def to_database(self, value): value = super(Date, self).to_database(value) From 5a6c276a887d38da1cae8cfb90188c60bf298a38 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sat, 23 Mar 2013 22:04:00 -0700 Subject: [PATCH 0145/3726] modifying pk comparison to look at the db_field_name, so it's not dependent on the ids being identical --- cqlengine/query.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index a0f758f18f..bb7d036f64 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -604,7 +604,7 @@ def delete(self, columns=[]): """ #validate where clause partition_key = self.model._primary_keys.values()[0] - if not any([c.column == partition_key for c in self._where]): + if not any([c.column.db_field_name == partition_key.db_field_name for c in self._where]): raise QueryException("The partition key must be defined on delete queries") qs = ['DELETE FROM {}'.format(self.column_family_name)] qs += ['WHERE {}'.format(self._where_clause())] From 2be6f3409c0b3fedc0e57bd99072e73571a30852 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Mon, 22 Apr 2013 16:02:31 -0700 Subject: [PATCH 0146/3726] Fixed issue with trying to call .close() on a None --- cqlengine/connection.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index e0afffdb8c..2a5ef989eb 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -78,7 +78,7 @@ def clear(cls): cls._queue.get().close() except: pass - + @classmethod def get(cls): """ @@ -124,7 +124,7 @@ def _create_connection(cls): global _hosts global _username global _password - + if not _hosts: raise CQLConnectionError("At least one host required") @@ -133,7 +133,7 @@ def _create_connection(cls): new_conn = cql.connect(host.name, host.port, user=_username, password=_password) new_conn.set_cql_version('3.0.0') return new_conn - + class connection_manager(object): """ @@ -145,7 +145,7 @@ def __init__(self): self.keyspace = None self.con = ConnectionPool.get() self.cur = None - + def close(self): if self.cur: self.cur.close() ConnectionPool.put(self.con) @@ -175,7 +175,6 @@ def execute(self, query, params={}): except TTransportException: #TODO: check for other errors raised in the event of a connection / server problem #move to the next connection and set the connection pool - self.con = None _host_idx += 1 _host_idx %= len(_hosts) self.con.close() From 8ec6fc38a43eea201e7086c455fdccb54d5df7de Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 2 May 2013 22:21:56 -0700 Subject: [PATCH 0147/3726] version number bump --- cqlengine/__init__.py | 2 +- docs/conf.py | 4 ++-- setup.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cqlengine/__init__.py b/cqlengine/__init__.py index 184a4c8a38..ed5a820213 100644 --- a/cqlengine/__init__.py +++ b/cqlengine/__init__.py @@ -3,5 +3,5 @@ from cqlengine.models import Model from cqlengine.query import BatchQuery -__version__ = '0.1.2' +__version__ = '0.2' diff --git a/docs/conf.py b/docs/conf.py index 59c9a1239b..96c7db6ed6 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -48,9 +48,9 @@ # built documents. # # The short X.Y version. -version = '0.1.2' +version = '0.2' # The full version, including alpha/beta/rc tags. -release = '0.1.2' +release = '0.2' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/setup.py b/setup.py index 4190fb384f..791c75df20 100644 --- a/setup.py +++ b/setup.py @@ -4,7 +4,7 @@ #python setup.py register #python setup.py sdist upload -version = '0.1.2' +version = '0.2' long_desc = """ cqlengine is a Cassandra CQL ORM for Python in the style of the Django orm and mongoengine From b8584770c0cf2a0b6c2713a4760b2a4b82ce4546 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mariusz=20Kry=C5=84ski?= Date: Sat, 4 May 2013 19:53:46 +0200 Subject: [PATCH 0148/3726] Token function --- cqlengine/functions.py | 11 +++++++++++ cqlengine/query.py | 11 +++++++---- 2 files changed, 18 insertions(+), 4 deletions(-) diff --git a/cqlengine/functions.py b/cqlengine/functions.py index 15e93d4659..3f023e75fb 100644 --- a/cqlengine/functions.py +++ b/cqlengine/functions.py @@ -23,6 +23,9 @@ def to_cql(self, value_id): def get_value(self): raise NotImplementedError + def format_cql(self, field, operator, value_id): + return '"{}" {} {}'.format(field, operator, self.to_cql(value_id)) + class MinTimeUUID(BaseQueryFunction): _cql_string = 'MinTimeUUID(:{})' @@ -57,3 +60,11 @@ def get_value(self): epoch = datetime(1970, 1, 1) return long((self.value - epoch).total_seconds() * 1000) +class Token(BaseQueryFunction): + _cql_string = 'token(:{})' + + def format_cql(self, field, operator, value_id): + return 'token("{}") {} {}'.format(field, operator, self.to_cql(value_id)) + + def get_value(self): + return self.value diff --git a/cqlengine/query.py b/cqlengine/query.py index bb7d036f64..a2762d3af0 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -8,7 +8,7 @@ from cqlengine.connection import connection_manager from cqlengine.exceptions import CQLEngineException -from cqlengine.functions import BaseQueryFunction +from cqlengine.functions import BaseQueryFunction, Token #CQL 3 reference: #http://www.datastax.com/docs/1.1/references/cql/index @@ -44,7 +44,7 @@ def cql(self): :param valname: the dict key that this operator's compare value will be found in """ if isinstance(self.value, BaseQueryFunction): - return '"{}" {} {}'.format(self.column.db_field_name, self.cql_symbol, self.value.to_cql(self.identifier)) + return self.value.format_cql(self.column.db_field_name, self.cql_symbol, self.identifier) else: return '"{}" {} :{}'.format(self.column.db_field_name, self.cql_symbol, self.identifier) @@ -275,14 +275,17 @@ def _validate_where_syntax(self): #check that there's either a = or IN relationship with a primary key or indexed field equal_ops = [w for w in self._where if isinstance(w, EqualsOperator)] - if not any([w.column.primary_key or w.column.index for w in equal_ops]): + token_ops = [w for w in self._where if isinstance(w.value, Token)] + if not any([w.column.primary_key or w.column.index for w in equal_ops]) and not token_ops: raise QueryException('Where clauses require either a "=" or "IN" comparison with either a primary key or indexed field') if not self._allow_filtering: #if the query is not on an indexed field if not any([w.column.index for w in equal_ops]): - if not any([w.column._partition_key for w in equal_ops]): + if not any([w.column._partition_key for w in equal_ops]) and not token_ops: raise QueryException('Filtering on a clustering key without a partition key is not allowed unless allow_filtering() is called on the querset') + if any(not w.column._partition_key for w in token_ops): + raise QueryException('The token() function is only supported on the partition key') #TODO: abuse this to see if we can get cql to raise an exception From 3cb08dd23a5c28ec573bb0fd328b513a8b4c4e37 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mariusz=20Kry=C5=84ski?= Date: Sat, 4 May 2013 20:05:22 +0200 Subject: [PATCH 0149/3726] do not call count() in QuerySet.__len__ --- cqlengine/query.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index a2762d3af0..fe8680b4e1 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -260,7 +260,8 @@ def __deepcopy__(self, memo): return clone def __len__(self): - return self.count() + self._execute_query() + return len(self._result_cache) def __del__(self): if self._con: From bc4f7ed78b8d9ee9e6474517d9ab262ca08099ce Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mariusz=20Kry=C5=84ski?= Date: Sat, 4 May 2013 22:30:27 +0200 Subject: [PATCH 0150/3726] QuerySet.values_list support --- cqlengine/query.py | 35 +++++++++++++++++++++++++++++++---- 1 file changed, 31 insertions(+), 4 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index fe8680b4e1..9ac1f948bb 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -222,6 +222,9 @@ def __init__(self, model): self._defer_fields = [] self._only_fields = [] + self._values_list = False + self._flat_values_list = False + #results cache self._con = None self._cur = None @@ -340,6 +343,9 @@ def _execute_query(self): self._con = connection_manager() self._cur = self._con.execute(self._select_query(), self._where_values()) self._result_cache = [None]*self._cur.rowcount + if self._cur.description: + names = [i[0] for i in self._cur.description] + self._construct_result = self._create_result_constructor(names) def _fill_result_cache_to_idx(self, idx): self._execute_query() @@ -350,14 +356,12 @@ def _fill_result_cache_to_idx(self, idx): if qty < 1: return else: - names = [i[0] for i in self._cur.description] for values in self._cur.fetchmany(qty): - value_dict = dict(zip(names, values)) self._result_idx += 1 - self._result_cache[self._result_idx] = self._construct_instance(value_dict) + self._result_cache[self._result_idx] = self._construct_result(values) #return the connection to the connection pool if we have all objects - if self._result_cache and self._result_cache[-1] is not None: + if self._result_cache and self._result_idx == (len(self._result_cache) - 1): self._con.close() self._con = None self._cur = None @@ -398,6 +402,17 @@ def __getitem__(self, s): self._fill_result_cache_to_idx(s) return self._result_cache[s] + def _create_result_constructor(self, names): + if not self._values_list: + return (lambda values: self._construct_instance(dict(zip(names, values)))) + + db_map = self.model._db_map + columns = [self.model._columns[db_map[name]] for name in names] + if self._flat_values_list: + return (lambda values: columns[0].to_python(values[0])) + else: + # result_cls = namedtuple("{}Tuple".format(self.model.__name__), names) + return (lambda values: map(lambda (c, v): c.to_python(v), zip(columns, values))) def _construct_instance(self, values): #translate column names to model names @@ -620,6 +635,18 @@ def delete(self, columns=[]): with connection_manager() as con: con.execute(qs, self._where_values()) + def values_list(self, *fields, **kwargs): + flat = kwargs.pop('flat', False) + if kwargs: + raise TypeError('Unexpected keyword arguments to values_list: %s' + % (kwargs.keys(),)) + if flat and len(fields) > 1: + raise TypeError("'flat' is not valid when values_list is called with more than one field.") + clone = self.only(fields) + clone._values_list = True + clone._flat_values_list = flat + return clone + class DMLQuery(object): """ A query object used for queries performing inserts, updates, or deletes From 49bd7415a82f1db81cc8696f804a1ca958787833 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mariusz=20Kry=C5=84ski?= Date: Tue, 7 May 2013 00:52:06 +0200 Subject: [PATCH 0151/3726] memory leak fix --- cqlengine/query.py | 27 ++++++++++----------------- 1 file changed, 10 insertions(+), 17 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index 9ac1f948bb..a4690df04a 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -403,30 +403,23 @@ def __getitem__(self, s): return self._result_cache[s] def _create_result_constructor(self, names): + model = self.model + db_map = model._db_map if not self._values_list: - return (lambda values: self._construct_instance(dict(zip(names, values)))) - - db_map = self.model._db_map - columns = [self.model._columns[db_map[name]] for name in names] + def _construct_instance(values): + field_dict = dict((db_map.get(k, k), v) for k, v in zip(names, values)) + instance = model(**field_dict) + instance._is_persisted = True + return instance + return _construct_instance + + columns = [model._columns[db_map[name]] for name in names] if self._flat_values_list: return (lambda values: columns[0].to_python(values[0])) else: # result_cls = namedtuple("{}Tuple".format(self.model.__name__), names) return (lambda values: map(lambda (c, v): c.to_python(v), zip(columns, values))) - def _construct_instance(self, values): - #translate column names to model names - field_dict = {} - db_map = self.model._db_map - for key, val in values.items(): - if key in db_map: - field_dict[db_map[key]] = val - else: - field_dict[key] = val - instance = self.model(**field_dict) - instance._is_persisted = True - return instance - def batch(self, batch_obj): """ Adds a batch query to the mix From edaf83f3aec166b5a28676404b0ad03c95d42547 Mon Sep 17 00:00:00 2001 From: Pandu Rao Date: Wed, 8 May 2013 17:06:09 -0700 Subject: [PATCH 0152/3726] Added guards to ensure that prev and val exist before calling their keys function --- cqlengine/columns.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index 76830d2c27..8a2dd79ea6 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -627,8 +627,8 @@ def get_delete_statement(self, val, prev, ctx): if isinstance(val, self.Quoter): val = val.value if isinstance(prev, self.Quoter): prev = prev.value - old_keys = set(prev.keys()) - new_keys = set(val.keys()) + old_keys = set(prev.keys()) if prev else set() + new_keys = set(val.keys()) if val else set() del_keys = old_keys - new_keys del_statements = [] From 1bdd1a14b135a6c6c6d6d86118135de983225f79 Mon Sep 17 00:00:00 2001 From: "Gregory R. Doermann" Date: Thu, 9 May 2013 11:21:44 -0600 Subject: [PATCH 0153/3726] Timezone aware datetime fix. --- cqlengine/columns.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index 76830d2c27..9c67a1641b 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -214,8 +214,12 @@ def to_database(self, value): value = super(DateTime, self).to_database(value) if not isinstance(value, datetime): raise ValidationError("'{}' is not a datetime object".format(value)) - epoch = datetime(1970, 1, 1) - return long((value - epoch).total_seconds() * 1000) + epoch = datetime(1970, 1, 1, tzinfo=value.tzinfo) + offset = 0 + if epoch.tzinfo: + offset_delta = epoch.tzinfo.utcoffset(epoch) + offset = offset_delta.days*24*3600 + offset_delta.seconds + return long(((value - epoch).total_seconds() - offset) * 1000) class Date(Column): From 29f5ef42acc0dbf73c6ea4a5ca312c3386656096 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 9 May 2013 10:40:05 -0700 Subject: [PATCH 0154/3726] fixing the base test connection --- cqlengine/tests/base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cqlengine/tests/base.py b/cqlengine/tests/base.py index e9fa8a9b19..64e6a3c2a9 100644 --- a/cqlengine/tests/base.py +++ b/cqlengine/tests/base.py @@ -7,7 +7,7 @@ class BaseCassEngTestCase(TestCase): def setUpClass(cls): super(BaseCassEngTestCase, cls).setUpClass() if not connection._hosts: - connection.setup(['localhost:9170'], default_keyspace='cqlengine_test') + connection.setup(['localhost:9160'], default_keyspace='cqlengine_test') def assertHasAttr(self, obj, attr): self.assertTrue(hasattr(obj, attr), From c7f9a6d9378ded459e65103a9f028ed13071a172 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 9 May 2013 10:56:57 -0700 Subject: [PATCH 0155/3726] adding check to update statement to avoid failure on update from None --- cqlengine/columns.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index 8a2dd79ea6..e028c538c9 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -600,6 +600,8 @@ def get_update_statement(self, val, prev, ctx): prev = self.to_database(prev) if isinstance(val, self.Quoter): val = val.value if isinstance(prev, self.Quoter): prev = prev.value + val = val or {} + prev = prev or {} #get the updated map update = {k:v for k,v in val.items() if v != prev.get(k)} From 47a0099708c381969125b18a3cd0416a08a55a1d Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 9 May 2013 10:57:50 -0700 Subject: [PATCH 0156/3726] adding additional tests around map column updates --- .../tests/columns/test_container_columns.py | 20 +++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/cqlengine/tests/columns/test_container_columns.py b/cqlengine/tests/columns/test_container_columns.py index b598da56bf..3533aad3ef 100644 --- a/cqlengine/tests/columns/test_container_columns.py +++ b/cqlengine/tests/columns/test_container_columns.py @@ -210,6 +210,26 @@ def test_partial_updates(self): m2 = TestMapModel.get(partition=m1.partition) assert m2.text_map == final + def test_updates_from_none(self): + """ Tests that updates from None work as expected """ + m = TestMapModel.create(int_map=None) + expected = {1:uuid4()} + m.int_map = expected + m.save() + + m2 = TestMapModel.get(partition=m.partition) + assert m2.int_map == expected + + + def test_updates_to_none(self): + """ Tests that setting the field to None works as expected """ + m = TestMapModel.create(int_map={1:uuid4()}) + m.int_map = None + m.save() + + m2 = TestMapModel.get(partition=m.partition) + assert m2.int_map is None + # def test_partial_update_creation(self): # """ # Tests that proper update statements are created for a partial list update From 558f6981b4ae3b064637fc4b561a1675866eea03 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 9 May 2013 11:59:10 -0700 Subject: [PATCH 0157/3726] added test for date time column with tzinfo --- cqlengine/tests/columns/test_validation.py | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/cqlengine/tests/columns/test_validation.py b/cqlengine/tests/columns/test_validation.py index 44903a4c85..e323c4653d 100644 --- a/cqlengine/tests/columns/test_validation.py +++ b/cqlengine/tests/columns/test_validation.py @@ -1,6 +1,7 @@ #tests the behavior of the column classes -from datetime import datetime +from datetime import datetime, timedelta from datetime import date +from datetime import tzinfo from decimal import Decimal as D from cqlengine import ValidationError @@ -42,6 +43,18 @@ def test_datetime_io(self): dt2 = self.DatetimeTest.objects(test_id=0).first() assert dt2.created_at.timetuple()[:6] == now.timetuple()[:6] + def test_datetime_tzinfo_io(self): + class TZ(tzinfo): + def utcoffset(self, date_time): + return timedelta(hours=-1) + def dst(self, date_time): + return None + + now = datetime(1982, 1, 1, tzinfo=TZ()) + dt = self.DatetimeTest.objects.create(test_id=0, created_at=now) + dt2 = self.DatetimeTest.objects(test_id=0).first() + assert dt2.created_at.timetuple()[:6] == (now + timedelta(hours=1)).timetuple()[:6] + class TestDate(BaseCassEngTestCase): class DateTest(Model): From 8d23c7ba7adb05a8e6404bb676af67ad866398b6 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 9 May 2013 12:03:20 -0700 Subject: [PATCH 0158/3726] updating pypi description --- setup.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/setup.py b/setup.py index 791c75df20..af82216686 100644 --- a/setup.py +++ b/setup.py @@ -16,8 +16,6 @@ [Users Mailing List](https://groups.google.com/forum/?fromgroups#!forum/cqlengine-users) [Dev Mailing List](https://groups.google.com/forum/?fromgroups#!forum/cqlengine-dev) - -**NOTE: cqlengine is in alpha and under development, some features may change. Make sure to check the changelog and test your app before upgrading** """ setup( From 7316fa20c02044e573ee8c769420bebbeba4a097 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 9 May 2013 12:04:24 -0700 Subject: [PATCH 0159/3726] updating changelog --- changelog | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/changelog b/changelog index 00dc02bced..3edb19f0fa 100644 --- a/changelog +++ b/changelog @@ -1,6 +1,10 @@ CHANGELOG -0.1.2 (in progress) +0.2.1 (in progress) +* adding support for datetimes with tzinfo (thanks @gdoermann) +* fixing bug in saving map updates (thanks @pandu-rao) + +0.2 * expanding internal save function to use update where appropriate * adding set, list, and map collection types * adding support for allow filtering flag From 2f8752353506cad8d806f362847517a8bb2475cb Mon Sep 17 00:00:00 2001 From: Silvio Tomatis Date: Mon, 13 May 2013 14:45:41 +0200 Subject: [PATCH 0160/3726] Add travis testing --- .travis.yml | 14 ++++++++++++++ 1 file changed, 14 insertions(+) create mode 100644 .travis.yml diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 0000000000..86b92c0ac8 --- /dev/null +++ b/.travis.yml @@ -0,0 +1,14 @@ +language: python +python: + - "2.7" +services: + - cassandra +before_install: + - sudo sh -c "echo 'JVM_OPTS=\"\${JVM_OPTS} -Djava.net.preferIPv4Stack=false\"' >> /usr/local/cassandra/conf/cassandra-env.sh" + - sudo service cassandra start +install: + - "pip install -r requirements.txt --use-mirrors" + - "pip install pytest --use-mirrors" +script: + - while [ ! -f /var/run/cassandra.pid ] ; do sleep 1 ; done # wait until cassandra is ready + - "py.test cqlengine/tests/" From 4fee79b348a9d19b8cf09bf748762265866c6ce8 Mon Sep 17 00:00:00 2001 From: Silvio Tomatis Date: Mon, 13 May 2013 15:10:14 +0200 Subject: [PATCH 0161/3726] Make all tests pass when run alltogether with 'py.test cqlengine/tests' --- cqlengine/tests/query/test_queryset.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/cqlengine/tests/query/test_queryset.py b/cqlengine/tests/query/test_queryset.py index d5e6fe16db..a77d750299 100644 --- a/cqlengine/tests/query/test_queryset.py +++ b/cqlengine/tests/query/test_queryset.py @@ -408,13 +408,15 @@ def test_conn_is_returned_after_filling_cache(self): def test_conn_is_returned_after_queryset_is_garbage_collected(self): """ Tests that the connection is returned to the connection pool after the queryset is gc'd """ from cqlengine.connection import ConnectionPool - assert ConnectionPool._queue.qsize() == 1 + # The queue size can be 1 if we just run this file's tests + # It will be 2 when we run 'em all + initial_size = ConnectionPool._queue.qsize() q = TestModel.objects(test_id=0) v = q[0] - assert ConnectionPool._queue.qsize() == 0 + assert ConnectionPool._queue.qsize() == initial_size - 1 del q - assert ConnectionPool._queue.qsize() == 1 + assert ConnectionPool._queue.qsize() == initial_size class TimeUUIDQueryModel(Model): partition = columns.UUID(primary_key=True) From 0022f21c975f1b47e8c8b9f25b73a35bc372a3bf Mon Sep 17 00:00:00 2001 From: Silvio Tomatis Date: Mon, 13 May 2013 15:48:44 +0200 Subject: [PATCH 0162/3726] Add test for boolean column in test_model_io --- cqlengine/tests/model/test_model_io.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cqlengine/tests/model/test_model_io.py b/cqlengine/tests/model/test_model_io.py index 7f708cf214..08e77ded36 100644 --- a/cqlengine/tests/model/test_model_io.py +++ b/cqlengine/tests/model/test_model_io.py @@ -1,4 +1,3 @@ -from unittest import skip from uuid import uuid4 import random from cqlengine.tests.base import BaseCassEngTestCase @@ -11,6 +10,7 @@ class TestModel(Model): count = columns.Integer() text = columns.Text(required=False) + a_bool = columns.Boolean(default=False) class TestModelIO(BaseCassEngTestCase): @@ -41,10 +41,12 @@ def test_model_updating_works_properly(self): tm = TestModel.objects.create(count=8, text='123456789') tm.count = 100 + tm.a_bool = True tm.save() tm2 = TestModel.objects(id=tm.pk).first() self.assertEquals(tm.count, tm2.count) + self.assertEquals(tm.a_bool, tm2.a_bool) def test_model_deleting_works_properly(self): """ From 52f87303a4681521d6b93772a7d0a478005e30b1 Mon Sep 17 00:00:00 2001 From: "Gregory R. Doermann" Date: Wed, 15 May 2013 13:46:27 -0600 Subject: [PATCH 0163/3726] Basic (working) counter column. You can add and subtract values. You must use CounterModel when using a CounterColumn. --- cqlengine/__init__.py | 2 +- cqlengine/columns.py | 8 +++++--- cqlengine/models.py | 12 ++++++++++++ cqlengine/query.py | 15 +++++++++++++-- 4 files changed, 31 insertions(+), 6 deletions(-) diff --git a/cqlengine/__init__.py b/cqlengine/__init__.py index 184a4c8a38..e46d0e6d49 100644 --- a/cqlengine/__init__.py +++ b/cqlengine/__init__.py @@ -1,6 +1,6 @@ from cqlengine.columns import * from cqlengine.functions import * -from cqlengine.models import Model +from cqlengine.models import Model, CounterModel from cqlengine.query import BatchQuery __version__ = '0.1.2' diff --git a/cqlengine/columns.py b/cqlengine/columns.py index 9c67a1641b..ce7ce9936c 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -310,11 +310,13 @@ def to_database(self, value): class Decimal(Column): db_type = 'decimal' -class Counter(Column): - #TODO: counter field +class Counter(Integer): + """ Validates like an integer, goes into the database as a counter + """ + db_type = 'counter' + def __init__(self, **kwargs): super(Counter, self).__init__(**kwargs) - raise NotImplementedError class ContainerQuoter(object): """ diff --git a/cqlengine/models.py b/cqlengine/models.py index aec06b4f89..372f3253bf 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -261,3 +261,15 @@ class Model(BaseModel): __metaclass__ = ModelMetaClass +class CounterBaseModel(BaseModel): + def _can_update(self): + # INSERT is not allowed for counter column families + return True + + +class CounterModel(CounterBaseModel): + """ + the db name for the column family can be set as the attribute db_name, or + it will be genertaed from the class name + """ + __metaclass__ = ModelMetaClass diff --git a/cqlengine/query.py b/cqlengine/query.py index bb7d036f64..db8d17a318 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -4,7 +4,7 @@ from hashlib import md5 from time import time from uuid import uuid1 -from cqlengine import BaseContainerColumn, BaseValueManager, Map +from cqlengine import BaseContainerColumn, BaseValueManager, Map, Counter from cqlengine.connection import connection_manager from cqlengine.exceptions import CQLEngineException @@ -676,7 +676,18 @@ def save(self): if not col.is_primary_key: val = values.get(name) if val is None: continue - if isinstance(col, BaseContainerColumn): + if isinstance(col, Counter): + field_ids.pop(name) + value = field_values.pop(name) + if value == 0: + # Don't increment that column + continue + elif value < 0: + sign = '-' + else: + sign = '+' + set_statements += ['{0} = {0} {1} {2}'.format(col.db_field_name, sign, abs(value))] + elif isinstance(col, BaseContainerColumn): #remove value from query values, the column will handle it query_values.pop(field_ids.get(name), None) From 1b72535064d6954b70d41cb2de236457ed711786 Mon Sep 17 00:00:00 2001 From: "Gregory R. Doermann" Date: Fri, 17 May 2013 14:12:14 -0600 Subject: [PATCH 0164/3726] Custom TTL and timestamp information can be passed in on save. --- cqlengine/connection.py | 4 +++- cqlengine/functions.py | 15 +++++++++++++++ cqlengine/models.py | 19 +++++++++++++++---- cqlengine/query.py | 26 ++++++++++++++++++++++---- 4 files changed, 55 insertions(+), 9 deletions(-) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index 2a5ef989eb..c56d08a928 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -156,13 +156,15 @@ def __enter__(self): def __exit__(self, type, value, traceback): self.close() - def execute(self, query, params={}): + def execute(self, query, params=None): """ Gets a connection from the pool and executes the given query, returns the cursor if there's a connection problem, this will silently create a new connection pool from the available hosts, and remove the problematic host from the host list """ + if params is None: + params = {} global _host_idx for i in range(len(_hosts)): diff --git a/cqlengine/functions.py b/cqlengine/functions.py index 15e93d4659..b2886c9ab4 100644 --- a/cqlengine/functions.py +++ b/cqlengine/functions.py @@ -57,3 +57,18 @@ def get_value(self): epoch = datetime(1970, 1, 1) return long((self.value - epoch).total_seconds() * 1000) + +class NotSet(object): + """ + Different from None, so you know when it just wasn't set compared to passing in None + """ + pass + + +def format_timestamp(timestamp): + if isinstance(timestamp, datetime): + epoch = datetime(1970, 1, 1) + ts = long((timestamp - epoch).total_seconds() * 1000) + else : + ts = long(timestamp) + return ts diff --git a/cqlengine/models.py b/cqlengine/models.py index 372f3253bf..33ccb56b92 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -3,9 +3,10 @@ from cqlengine import columns from cqlengine.exceptions import ModelException -from cqlengine.functions import BaseQueryFunction +from cqlengine.functions import BaseQueryFunction, NotSet from cqlengine.query import QuerySet, QueryException, DMLQuery + class ModelDefinitionException(ModelException): pass DEFAULT_KEYSPACE = 'cqlengine' @@ -38,12 +39,20 @@ class MultipleObjectsReturned(QueryException): pass #however, you can also define them manually here table_name = None + #DEFAULT_TTL must be an integer seconds for the default time to live on any insert on the table + #this can be overridden on any given query, but you can set a default on the model + DEFAULT_TTL = None + #the keyspace for this model keyspace = None read_repair_chance = 0.1 - def __init__(self, **values): + def __init__(self, ttl=NotSet, **values): self._values = {} + if ttl == NotSet: + self.ttl = self.DEFAULT_TTL + else: + self.ttl = ttl for name, column in self._columns.items(): value = values.get(name, None) if value is not None: value = column.to_python(value) @@ -136,10 +145,12 @@ def filter(cls, **kwargs): def get(cls, **kwargs): return cls.objects.get(**kwargs) - def save(self): + def save(self, ttl=NotSet, timestamp=None): + if ttl == NotSet: + ttl = self.ttl is_new = self.pk is None self.validate() - DMLQuery(self.__class__, self, batch=self._batch).save() + DMLQuery(self.__class__, self, batch=self._batch).save(ttl, timestamp) #reset the value managers for v in self._values.values(): diff --git a/cqlengine/query.py b/cqlengine/query.py index db8d17a318..eca930a07e 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -8,7 +8,7 @@ from cqlengine.connection import connection_manager from cqlengine.exceptions import CQLEngineException -from cqlengine.functions import BaseQueryFunction +from cqlengine.functions import BaseQueryFunction, format_timestamp #CQL 3 reference: #http://www.datastax.com/docs/1.1/references/cql/index @@ -175,8 +175,7 @@ def execute(self): opener = 'BEGIN ' + (self.batch_type + ' ' if self.batch_type else '') + ' BATCH' if self.timestamp: - epoch = datetime(1970, 1, 1) - ts = long((self.timestamp - epoch).total_seconds() * 1000) + ts = format_timestamp(self.timestamp) opener += ' USING TIMESTAMP {}'.format(ts) query_list = [opener] @@ -638,7 +637,7 @@ def batch(self, batch_obj): self.batch = batch_obj return self - def save(self): + def save(self, ttl=None, timestamp=None): """ Creates / updates a row. This is a blind insert call. @@ -666,8 +665,25 @@ def save(self): query_values = {field_ids[n]:field_values[n] for n in field_names} qs = [] + + using = [] + if ttl is not None: + ttl = int(ttl) + using.append('TTL {} '.format(ttl)) + if timestamp: + ts = format_timestamp(timestamp) + using.append('TIMESTAMP {} '.format(ts)) + + usings = '' + if using: + using = 'AND '.join(using).strip() + usings = ' USING {}'.format(using) + + if self.instance._can_update(): qs += ["UPDATE {}".format(self.column_family_name)] + if usings: + qs += [usings] qs += ["SET"] set_statements = [] @@ -714,6 +730,8 @@ def save(self): qs += ["({})".format(', '.join(['"{}"'.format(f) for f in field_names]))] qs += ['VALUES'] qs += ["({})".format(', '.join([':'+field_ids[f] for f in field_names]))] + if usings: + qs += [usings] qs = ' '.join(qs) From ff67b994835bbd3db97b74602cd9b7efafcd161b Mon Sep 17 00:00:00 2001 From: Pandu Rao Date: Tue, 21 May 2013 17:59:04 -0700 Subject: [PATCH 0165/3726] Fixed #46 delete property only on can_delete --- cqlengine/models.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cqlengine/models.py b/cqlengine/models.py index aec06b4f89..1b1181ee83 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -192,9 +192,9 @@ def _transform_column(col_name, col_obj): _set = lambda self, val: self._values[col_name].setval(val) _del = lambda self: self._values[col_name].delval() if col_obj.can_delete: - attrs[col_name] = property(_get, _set) - else: attrs[col_name] = property(_get, _set, _del) + else: + attrs[col_name] = property(_get, _set) column_definitions = [(k,v) for k,v in attrs.items() if isinstance(v, columns.Column)] column_definitions = sorted(column_definitions, lambda x,y: cmp(x[1].position, y[1].position)) From bdc0c0399ae4315dba3e4e3e0dfbd4fa267a4a0b Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Wed, 22 May 2013 11:46:01 -0700 Subject: [PATCH 0166/3726] changing ORM (object row mapper) to Object Mapper, to avoid any confusion regarding support for object relationships --- README.md | 2 +- docs/index.rst | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 3e5b06bfa6..7b244ffeb6 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ cqlengine =============== -cqlengine is a Cassandra CQL 3 ORM for Python with an interface similar to the Django orm and mongoengine +cqlengine is a Cassandra CQL 3 Object Mapper for Python with an interface similar to the Django orm and mongoengine [Documentation](https://cqlengine.readthedocs.org/en/latest/) diff --git a/docs/index.rst b/docs/index.rst index dbc64e54f9..6cea2f2c33 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -5,7 +5,7 @@ cqlengine documentation ======================= -cqlengine is a Cassandra CQL 3 ORM for Python with an interface similar to the Django orm and mongoengine +cqlengine is a Cassandra CQL 3 Object Mapper for Python with an interface similar to the Django orm and mongoengine :ref:`getting-started` From 7e56e25d2186c00f75f735d0b74f9aa17de3d483 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mariusz=20Kry=C5=84ski?= Date: Sat, 25 May 2013 16:00:49 +0200 Subject: [PATCH 0167/3726] changed .all() behaviour - it clones QuerySet as in Django ORM --- cqlengine/query.py | 24 +++++++++++++++++++++--- cqlengine/tests/query/test_queryset.py | 6 +++--- 2 files changed, 24 insertions(+), 6 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index a4690df04a..809dedd85e 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -102,6 +102,19 @@ def _recurse(klass): except KeyError: raise QueryOperatorException("{} doesn't map to a QueryOperator".format(symbol)) + # equality operator, used by tests + + def __eq__(self, op): + return self.__class__ is op.__class__ and \ + self.column.db_field_name == op.column.db_field_name and \ + self.value == op.value + + def __ne__(self, op): + return not (self == op) + + def __hash__(self): + return hash(self.column.db_field_name) ^ hash(self.value) + class EqualsOperator(QueryOperator): symbol = 'EQ' cql_symbol = '=' @@ -439,9 +452,7 @@ def first(self): return None def all(self): - clone = copy.deepcopy(self) - clone._where = [] - return clone + return copy.deepcopy(self) def _parse_filter_arg(self, arg): """ @@ -640,6 +651,13 @@ def values_list(self, *fields, **kwargs): clone._flat_values_list = flat return clone + + def __eq__(self, q): + return set(self._where) == set(q._where) + + def __ne__(self, q): + return not (self != q) + class DMLQuery(object): """ A query object used for queries performing inserts, updates, or deletes diff --git a/cqlengine/tests/query/test_queryset.py b/cqlengine/tests/query/test_queryset.py index a77d750299..3c9c43cf9d 100644 --- a/cqlengine/tests/query/test_queryset.py +++ b/cqlengine/tests/query/test_queryset.py @@ -89,9 +89,9 @@ def test_queryset_is_immutable(self): query2 = query1.filter(expected_result__gte=1) assert len(query2._where) == 2 - def test_the_all_method_clears_where_filter(self): + def test_the_all_method_duplicates_queryset(self): """ - Tests that calling all on a queryset with previously defined filters returns a queryset with no filters + Tests that calling all on a queryset with previously defined filters duplicates queryset """ query1 = TestModel.objects(test_id=5) assert len(query1._where) == 1 @@ -100,7 +100,7 @@ def test_the_all_method_clears_where_filter(self): assert len(query2._where) == 2 query3 = query2.all() - assert len(query3._where) == 0 + assert query3 == query2 def test_defining_only_and_defer_fails(self): """ From ec10c687fb5e04dda360971626a31e20926109a4 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 26 May 2013 08:10:10 -0700 Subject: [PATCH 0168/3726] removing alpha from pypi info --- setup.py | 1 - 1 file changed, 1 deletion(-) diff --git a/setup.py b/setup.py index af82216686..860b6739e3 100644 --- a/setup.py +++ b/setup.py @@ -25,7 +25,6 @@ dependency_links = ['https://github.com/bdeggleston/cqlengine/archive/{0}.tar.gz#egg=cqlengine-{0}'.format(version)], long_description=long_desc, classifiers = [ - "Development Status :: 3 - Alpha", "Environment :: Web Environment", "Environment :: Plugins", "License :: OSI Approved :: BSD License", From c151fbd8b1b55f7ffc0f65b09249e4eaa00f5fbe Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 26 May 2013 08:10:52 -0700 Subject: [PATCH 0169/3726] updating pypi description --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 860b6739e3..0f98baecbe 100644 --- a/setup.py +++ b/setup.py @@ -7,7 +7,7 @@ version = '0.2' long_desc = """ -cqlengine is a Cassandra CQL ORM for Python in the style of the Django orm and mongoengine +cqlengine is a Cassandra CQL Object Mapper for Python in the style of the Django orm and mongoengine [Documentation](https://cqlengine.readthedocs.org/en/latest/) From d1e69bf01df7abdb1b1f2429138e51998d213f5b Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 26 May 2013 08:16:21 -0700 Subject: [PATCH 0170/3726] adding test around deleting of model attributes --- cqlengine/tests/model/test_class_construction.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/cqlengine/tests/model/test_class_construction.py b/cqlengine/tests/model/test_class_construction.py index 77b1d73775..36aa11e90f 100644 --- a/cqlengine/tests/model/test_class_construction.py +++ b/cqlengine/tests/model/test_class_construction.py @@ -117,7 +117,18 @@ def test_meta_data_is_not_inherited(self): """ Test that metadata defined in one class, is not inherited by subclasses """ - + + def test_del_attribute_is_assigned_properly(self): + """ Tests that columns that can be deleted have the del attribute """ + class DelModel(Model): + key = columns.Integer(primary_key=True) + data = columns.Integer(required=False) + + model = DelModel(key=4, data=5) + del model.data + with self.assertRaises(AttributeError): + del model.key + class TestManualTableNaming(BaseCassEngTestCase): class RenamedTest(cqlengine.Model): From 786a21ad0d898dc225cd38dd0a23fb6503ef957c Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Wed, 22 May 2013 11:46:01 -0700 Subject: [PATCH 0171/3726] changing ORM (object row mapper) to Object Mapper, to avoid any confusion regarding support for object relationships --- README.md | 2 +- docs/index.rst | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 3e5b06bfa6..7b244ffeb6 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ cqlengine =============== -cqlengine is a Cassandra CQL 3 ORM for Python with an interface similar to the Django orm and mongoengine +cqlengine is a Cassandra CQL 3 Object Mapper for Python with an interface similar to the Django orm and mongoengine [Documentation](https://cqlengine.readthedocs.org/en/latest/) diff --git a/docs/index.rst b/docs/index.rst index dbc64e54f9..6cea2f2c33 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -5,7 +5,7 @@ cqlengine documentation ======================= -cqlengine is a Cassandra CQL 3 ORM for Python with an interface similar to the Django orm and mongoengine +cqlengine is a Cassandra CQL 3 Object Mapper for Python with an interface similar to the Django orm and mongoengine :ref:`getting-started` From 4826f384823bc85d51a601b7b72a5624c2096249 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 26 May 2013 08:10:10 -0700 Subject: [PATCH 0172/3726] removing alpha from pypi info --- setup.py | 1 - 1 file changed, 1 deletion(-) diff --git a/setup.py b/setup.py index af82216686..860b6739e3 100644 --- a/setup.py +++ b/setup.py @@ -25,7 +25,6 @@ dependency_links = ['https://github.com/bdeggleston/cqlengine/archive/{0}.tar.gz#egg=cqlengine-{0}'.format(version)], long_description=long_desc, classifiers = [ - "Development Status :: 3 - Alpha", "Environment :: Web Environment", "Environment :: Plugins", "License :: OSI Approved :: BSD License", From d6b984b95223ce538274b3e8d420ef2c441564b7 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 26 May 2013 08:10:52 -0700 Subject: [PATCH 0173/3726] updating pypi description --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 860b6739e3..0f98baecbe 100644 --- a/setup.py +++ b/setup.py @@ -7,7 +7,7 @@ version = '0.2' long_desc = """ -cqlengine is a Cassandra CQL ORM for Python in the style of the Django orm and mongoengine +cqlengine is a Cassandra CQL Object Mapper for Python in the style of the Django orm and mongoengine [Documentation](https://cqlengine.readthedocs.org/en/latest/) From bb3ea850f250f01cd68db2f77644fcd6c7b45fdc Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 26 May 2013 08:16:21 -0700 Subject: [PATCH 0174/3726] adding test around deleting of model attributes --- cqlengine/tests/model/test_class_construction.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/cqlengine/tests/model/test_class_construction.py b/cqlengine/tests/model/test_class_construction.py index 77b1d73775..36aa11e90f 100644 --- a/cqlengine/tests/model/test_class_construction.py +++ b/cqlengine/tests/model/test_class_construction.py @@ -117,7 +117,18 @@ def test_meta_data_is_not_inherited(self): """ Test that metadata defined in one class, is not inherited by subclasses """ - + + def test_del_attribute_is_assigned_properly(self): + """ Tests that columns that can be deleted have the del attribute """ + class DelModel(Model): + key = columns.Integer(primary_key=True) + data = columns.Integer(required=False) + + model = DelModel(key=4, data=5) + del model.data + with self.assertRaises(AttributeError): + del model.key + class TestManualTableNaming(BaseCassEngTestCase): class RenamedTest(cqlengine.Model): From 929d1736eac5e1f03bab6184db7360a8d3b2a339 Mon Sep 17 00:00:00 2001 From: Pandu Rao Date: Sun, 26 May 2013 10:18:31 -0700 Subject: [PATCH 0175/3726] Fixed error in sample code in documentation --- docs/index.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/index.rst b/docs/index.rst index 6cea2f2c33..f93a8505c4 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -64,7 +64,7 @@ Getting Started >>> q.count() 4 >>> for instance in q: - >>> print q.description + >>> print instance.description example5 example6 example7 @@ -78,7 +78,7 @@ Getting Started >>> q2.count() 1 >>> for instance in q2: - >>> print q.description + >>> print instance.description example5 From d9b0eb9bb5dacc00a1efa3c8fcbddb82fe66c147 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 26 May 2013 12:20:17 -0600 Subject: [PATCH 0176/3726] duplicate pandu-rao's doc fixes in the readme --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 7b244ffeb6..b54711f116 100644 --- a/README.md +++ b/README.md @@ -57,7 +57,7 @@ class ExampleModel(Model): >>> q.count() 4 >>> for instance in q: ->>> print q.description +>>> print instance.description example5 example6 example7 @@ -71,6 +71,6 @@ example8 >>> q2.count() 1 >>> for instance in q2: ->>> print q.description +>>> print instance.description example5 ``` From cdcccbb99a9ead5a1aea67585d62ec42291ab964 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 26 May 2013 12:20:17 -0600 Subject: [PATCH 0177/3726] duplicate pandu-rao's doc fixes in the readme --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 7b244ffeb6..b54711f116 100644 --- a/README.md +++ b/README.md @@ -57,7 +57,7 @@ class ExampleModel(Model): >>> q.count() 4 >>> for instance in q: ->>> print q.description +>>> print instance.description example5 example6 example7 @@ -71,6 +71,6 @@ example8 >>> q2.count() 1 >>> for instance in q2: ->>> print q.description +>>> print instance.description example5 ``` From 7c43b39237f7a1e0ee28da347705cc45eb89d83f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mariusz=20Kry=C5=84ski?= Date: Mon, 27 May 2013 00:34:53 +0200 Subject: [PATCH 0178/3726] clustering order, composite partition keys, Token func, docs & tests --- cqlengine/columns.py | 41 +++++++-- cqlengine/functions.py | 56 ++++++++----- cqlengine/management.py | 17 +++- cqlengine/models.py | 45 ++++++---- cqlengine/query.py | 84 ++++++++++--------- .../tests/model/test_class_construction.py | 24 +++++- .../tests/model/test_clustering_order.py | 35 ++++++++ cqlengine/tests/query/test_queryoperators.py | 20 ++++- cqlengine/tests/query/test_queryset.py | 11 ++- docs/topics/columns.rst | 11 ++- docs/topics/models.rst | 5 +- docs/topics/queryset.rst | 27 ++++++ 12 files changed, 284 insertions(+), 92 deletions(-) create mode 100644 cqlengine/tests/model/test_clustering_order.py diff --git a/cqlengine/columns.py b/cqlengine/columns.py index 3c9bad45ba..ebf8cbc2ee 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -60,7 +60,7 @@ class Column(object): instance_counter = 0 - def __init__(self, primary_key=False, index=False, db_field=None, default=None, required=True): + def __init__(self, primary_key=False, partition_key=False, index=False, db_field=None, default=None, required=True, clustering_order=None): """ :param primary_key: bool flag, indicates this column is a primary key. The first primary key defined on a model is the partition key, all others are cluster keys @@ -69,15 +69,13 @@ def __init__(self, primary_key=False, index=False, db_field=None, default=None, :param default: the default value, can be a value or a callable (no args) :param required: boolean, is the field required? """ - self.primary_key = primary_key + self.partition_key = partition_key + self.primary_key = partition_key or primary_key self.index = index self.db_field = db_field self.default = default self.required = required - - #only the model meta class should touch this - self._partition_key = False - + self.clustering_order = clustering_order #the column name in the model definition self.column_name = None @@ -137,7 +135,7 @@ def get_column_def(self): """ Returns a column definition for CQL table definition """ - return '"{}" {}'.format(self.db_field_name, self.db_type) + return '{} {}'.format(self.cql, self.db_type) def set_column_name(self, name): """ @@ -156,6 +154,13 @@ def db_index_name(self): """ Returns the name of the cql index """ return 'index_{}'.format(self.db_field_name) + @property + def cql(self): + return self.get_cql() + + def get_cql(self): + return '"{}"'.format(self.db_field_name) + class Bytes(Column): db_type = 'blob' @@ -645,4 +650,26 @@ def get_delete_statement(self, val, prev, ctx): return del_statements +class _PartitionKeys(Column): + class value_manager(BaseValueManager): + pass + + def __init__(self, model): + self.model = model + +class _PartitionKeysToken(Column): + """ + virtual column representing token of partition columns. + Used by filter(pk__token=Token(...)) filters + """ + + def __init__(self, model): + self.partition_columns = model._partition_keys.values() + super(_PartitionKeysToken, self).__init__(partition_key=True) + + def to_database(self, value): + raise NotImplementedError + + def get_cql(self): + return "token({})".format(", ".join(c.cql for c in self.partition_columns)) diff --git a/cqlengine/functions.py b/cqlengine/functions.py index 3f023e75fb..947841808a 100644 --- a/cqlengine/functions.py +++ b/cqlengine/functions.py @@ -1,30 +1,39 @@ from datetime import datetime +from uuid import uuid1 from cqlengine.exceptions import ValidationError -class BaseQueryFunction(object): +class QueryValue(object): """ - Base class for filtering functions. Subclasses of these classes can - be passed into .filter() and will be translated into CQL functions in - the resulting query + Base class for query filter values. Subclasses of these classes can + be passed into .filter() keyword args """ - _cql_string = None + _cql_string = ':{}' - def __init__(self, value): + def __init__(self, value, identifier=None): self.value = value + self.identifier = uuid1().hex if identifier is None else identifier - def to_cql(self, value_id): - """ - Returns a function for cql with the value id as it's argument - """ - return self._cql_string.format(value_id) + def get_cql(self): + return self._cql_string.format(self.identifier) def get_value(self): - raise NotImplementedError + return self.value + + def get_dict(self, column): + return {self.identifier: column.to_database(self.get_value())} + + @property + def cql(self): + return self.get_cql() - def format_cql(self, field, operator, value_id): - return '"{}" {} {}'.format(field, operator, self.to_cql(value_id)) +class BaseQueryFunction(QueryValue): + """ + Base class for filtering functions. Subclasses of these classes can + be passed into .filter() and will be translated into CQL functions in + the resulting query + """ class MinTimeUUID(BaseQueryFunction): @@ -61,10 +70,19 @@ def get_value(self): return long((self.value - epoch).total_seconds() * 1000) class Token(BaseQueryFunction): - _cql_string = 'token(:{})' - def format_cql(self, field, operator, value_id): - return 'token("{}") {} {}'.format(field, operator, self.to_cql(value_id)) + def __init__(self, *values): + if len(values) == 1 and isinstance(values[0], (list, tuple)): + values = values[0] + super(Token, self).__init__(values, [uuid1().hex for i in values]) + + def get_dict(self, column): + items = zip(self.identifier, self.value, column.partition_columns) + return dict( + (id, col.to_database(val)) for id, val, col in items + ) + + def get_cql(self): + token_args = ', '.join(':{}'.format(id) for id in self.identifier) + return "token({})".format(token_args) - def get_value(self): - return self.value diff --git a/cqlengine/management.py b/cqlengine/management.py index 67c34f6841..f5f6a64923 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -61,20 +61,29 @@ def create_table(model, create_missing_keyspace=True): #add column types pkeys = [] + ckeys = [] qtypes = [] def add_column(col): s = col.get_column_def() - if col.primary_key: pkeys.append('"{}"'.format(col.db_field_name)) + if col.primary_key: + keys = (pkeys if col.partition_key else ckeys) + keys.append('"{}"'.format(col.db_field_name)) qtypes.append(s) for name, col in model._columns.items(): add_column(col) - qtypes.append('PRIMARY KEY ({})'.format(', '.join(pkeys))) + qtypes.append('PRIMARY KEY (({}){})'.format(', '.join(pkeys), ckeys and ', ' + ', '.join(ckeys) or '')) qs += ['({})'.format(', '.join(qtypes))] - + + with_qs = ['read_repair_chance = {}'.format(model.read_repair_chance)] + + _order = ["%s %s" % (c.db_field_name, c.clustering_order or 'ASC') for c in model._clustering_keys.values()] + if _order: + with_qs.append("clustering order by ({})".format(', '.join(_order))) + # add read_repair_chance - qs += ['WITH read_repair_chance = {}'.format(model.read_repair_chance)] + qs += ['WITH {}'.format(' AND '.join(with_qs))] qs = ' '.join(qs) try: diff --git a/cqlengine/models.py b/cqlengine/models.py index aec06b4f89..f06042d72e 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -102,10 +102,10 @@ def column_family_name(cls, include_keyspace=True): if not include_keyspace: return cf_name return '{}.{}'.format(cls._get_keyspace(), cf_name) - @property - def pk(self): - """ Returns the object's primary key """ - return getattr(self, self._pk_name) + #@property + #def pk(self): + # """ Returns the object's primary key """ + # return getattr(self, self._pk_name) def validate(self): """ Cleans and validates the field values """ @@ -174,7 +174,6 @@ def __new__(cls, name, bases, attrs): column_dict = OrderedDict() primary_keys = OrderedDict() pk_name = None - primary_key = None #get inherited properties inherited_columns = OrderedDict() @@ -210,24 +209,40 @@ def _transform_column(col_name, col_obj): k,v = 'id', columns.UUID(primary_key=True) column_definitions = [(k,v)] + column_definitions + has_partition_keys = any(v.partition_key for (k, v) in column_definitions) + #TODO: check that the defined columns don't conflict with any of the Model API's existing attributes/methods #transform column definitions for k,v in column_definitions: - if pk_name is None and v.primary_key: - pk_name = k - primary_key = v - v._partition_key = True + if not has_partition_keys and v.primary_key: + v.partition_key = True + has_partition_keys = True _transform_column(k,v) - - #setup primary key shortcut - if pk_name != 'pk': + + partition_keys = OrderedDict(k for k in primary_keys.items() if k[1].partition_key) + clustering_keys = OrderedDict(k for k in primary_keys.items() if not k[1].partition_key) + + #setup partition key shortcut + assert partition_keys + if len(partition_keys) == 1: + pk_name = partition_keys.keys()[0] attrs['pk'] = attrs[pk_name] + else: + # composite partition key case + _get = lambda self: tuple(self._values[c].getval() for c in partition_keys.keys()) + _set = lambda self, val: tuple(self._values[c].setval(v) for (c, v) in zip(partition_keys.keys(), val)) + attrs['pk'] = property(_get, _set) - #check for duplicate column names + # some validation col_names = set() for v in column_dict.values(): + # check for duplicate column names if v.db_field_name in col_names: raise ModelException("{} defines the column {} more than once".format(name, v.db_field_name)) + if v.clustering_order and not (v.primary_key and not v.partition_key): + raise ModelException("clustering_order may be specified only for clustering primary keys") + if v.clustering_order and v.clustering_order.lower() not in ('asc', 'desc'): + raise ModelException("invalid clustering order {} for column {}".format(repr(v.clustering_order), v.db_field_name)) col_names.add(v.db_field_name) #create db_name -> model name map for loading @@ -244,9 +259,11 @@ def _transform_column(col_name, col_obj): attrs['_defined_columns'] = defined_columns attrs['_db_map'] = db_map attrs['_pk_name'] = pk_name - attrs['_primary_key'] = primary_key attrs['_dynamic_columns'] = {} + attrs['_partition_keys'] = partition_keys + attrs['_clustering_keys'] = clustering_keys + #create the class and add a QuerySet to it klass = super(ModelMetaClass, cls).__new__(cls, name, bases, attrs) klass.objects = QuerySet(klass) diff --git a/cqlengine/query.py b/cqlengine/query.py index a4690df04a..19974eebbf 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -4,11 +4,11 @@ from hashlib import md5 from time import time from uuid import uuid1 -from cqlengine import BaseContainerColumn, BaseValueManager, Map +from cqlengine import BaseContainerColumn, BaseValueManager, Map, columns from cqlengine.connection import connection_manager from cqlengine.exceptions import CQLEngineException -from cqlengine.functions import BaseQueryFunction, Token +from cqlengine.functions import QueryValue, Token #CQL 3 reference: #http://www.datastax.com/docs/1.1/references/cql/index @@ -24,14 +24,16 @@ class QueryOperator(object): # The comparator symbol this operator uses in cql cql_symbol = None + QUERY_VALUE_WRAPPER = QueryValue + def __init__(self, column, value): self.column = column self.value = value - #the identifier is a unique key that will be used in string - #replacement on query strings, it's created from a hash - #of this object's id and the time - self.identifier = uuid1().hex + if isinstance(value, QueryValue): + self.query_value = value + else: + self.query_value = self.QUERY_VALUE_WRAPPER(value) #perform validation on this operator self.validate_operator() @@ -41,12 +43,8 @@ def __init__(self, column, value): def cql(self): """ Returns this operator's portion of the WHERE clause - :param valname: the dict key that this operator's compare value will be found in """ - if isinstance(self.value, BaseQueryFunction): - return self.value.format_cql(self.column.db_field_name, self.cql_symbol, self.identifier) - else: - return '"{}" {} :{}'.format(self.column.db_field_name, self.cql_symbol, self.identifier) + return '{} {} {}'.format(self.column.cql, self.cql_symbol, self.query_value.cql) def validate_operator(self): """ @@ -81,10 +79,7 @@ def get_dict(self): this should return the dict: {'colval':} SELECT * FROM column_family WHERE colname=:colval """ - if isinstance(self.value, BaseQueryFunction): - return {self.identifier: self.column.to_database(self.value.get_value())} - else: - return {self.identifier: self.column.to_database(self.value)} + return self.query_value.get_dict(self.column) @classmethod def get_operator(cls, symbol): @@ -102,34 +97,34 @@ def _recurse(klass): except KeyError: raise QueryOperatorException("{} doesn't map to a QueryOperator".format(symbol)) + def __eq__(self, op): + return self.__class__ is op.__class__ and self.column == op.column and self.value == op.value + + def __ne__(self, op): + return not (self == op) + class EqualsOperator(QueryOperator): symbol = 'EQ' cql_symbol = '=' +class IterableQueryValue(QueryValue): + def __init__(self, value): + try: + super(IterableQueryValue, self).__init__(value, [uuid1().hex for i in value]) + except TypeError: + raise QueryException("in operator arguments must be iterable, {} found".format(value)) + + def get_dict(self, column): + return dict((i, column.to_database(v)) for (i, v) in zip(self.identifier, self.value)) + + def get_cql(self): + return '({})'.format(', '.join(':{}'.format(i) for i in self.identifier)) + class InOperator(EqualsOperator): symbol = 'IN' cql_symbol = 'IN' - class Quoter(object): - """ - contains a single value, which will quote itself for CQL insertion statements - """ - def __init__(self, value): - self.value = value - - def __str__(self): - from cql.query import cql_quote as cq - return '(' + ', '.join([cq(v) for v in self.value]) + ')' - - def get_dict(self): - if isinstance(self.value, BaseQueryFunction): - return {self.identifier: self.column.to_database(self.value.get_value())} - else: - try: - values = [v for v in self.value] - except TypeError: - raise QueryException("in operator arguments must be iterable, {} found".format(self.value)) - return {self.identifier: self.Quoter([self.column.to_database(v) for v in self.value])} + QUERY_VALUE_WRAPPER = IterableQueryValue class GreaterThanOperator(QueryOperator): symbol = "GT" @@ -286,9 +281,9 @@ def _validate_where_syntax(self): if not self._allow_filtering: #if the query is not on an indexed field if not any([w.column.index for w in equal_ops]): - if not any([w.column._partition_key for w in equal_ops]) and not token_ops: + if not any([w.column.partition_key for w in equal_ops]) and not token_ops: raise QueryException('Filtering on a clustering key without a partition key is not allowed unless allow_filtering() is called on the querset') - if any(not w.column._partition_key for w in token_ops): + if any(not w.column.partition_key for w in token_ops): raise QueryException('The token() function is only supported on the partition key') @@ -314,7 +309,7 @@ def _select_query(self): if self._defer_fields: fields = [f for f in fields if f not in self._defer_fields] elif self._only_fields: - fields = [f for f in fields if f in self._only_fields] + fields = self._only_fields db_fields = [self.model._columns[f].db_field_name for f in fields] qs = ['SELECT {}'.format(', '.join(['"{}"'.format(f) for f in db_fields]))] @@ -449,7 +444,7 @@ def _parse_filter_arg(self, arg): __ :returns: colname, op tuple """ - statement = arg.split('__') + statement = arg.rsplit('__', 1) if len(statement) == 1: return arg, None elif len(statement) == 2: @@ -466,7 +461,10 @@ def filter(self, **kwargs): try: column = self.model._columns[col_name] except KeyError: - raise QueryException("Can't resolve column name: '{}'".format(col_name)) + if col_name == 'pk__token': + column = columns._PartitionKeysToken(self.model) + else: + raise QueryException("Can't resolve column name: '{}'".format(col_name)) #get query operator, or use equals if not supplied operator_class = QueryOperator.get_operator(col_op or 'EQ') @@ -640,6 +638,12 @@ def values_list(self, *fields, **kwargs): clone._flat_values_list = flat return clone + def __eq__(self, q): + return self._where == q._where + + def __ne__(self, q): + return not (self != q) + class DMLQuery(object): """ A query object used for queries performing inserts, updates, or deletes diff --git a/cqlengine/tests/model/test_class_construction.py b/cqlengine/tests/model/test_class_construction.py index 77b1d73775..02d51f5671 100644 --- a/cqlengine/tests/model/test_class_construction.py +++ b/cqlengine/tests/model/test_class_construction.py @@ -117,7 +117,29 @@ def test_meta_data_is_not_inherited(self): """ Test that metadata defined in one class, is not inherited by subclasses """ - + + def test_partition_keys(self): + """ + Test compound partition key definition + """ + class ModelWithPartitionKeys(cqlengine.Model): + c1 = cqlengine.Text(primary_key=True) + p1 = cqlengine.Text(partition_key=True) + p2 = cqlengine.Text(partition_key=True) + + cols = ModelWithPartitionKeys._columns + + self.assertTrue(cols['c1'].primary_key) + self.assertFalse(cols['c1'].partition_key) + + self.assertTrue(cols['p1'].primary_key) + self.assertTrue(cols['p1'].partition_key) + self.assertTrue(cols['p2'].primary_key) + self.assertTrue(cols['p2'].partition_key) + + obj = ModelWithPartitionKeys(p1='a', p2='b') + self.assertEquals(obj.pk, ('a', 'b')) + class TestManualTableNaming(BaseCassEngTestCase): class RenamedTest(cqlengine.Model): diff --git a/cqlengine/tests/model/test_clustering_order.py b/cqlengine/tests/model/test_clustering_order.py new file mode 100644 index 0000000000..92d8e067e8 --- /dev/null +++ b/cqlengine/tests/model/test_clustering_order.py @@ -0,0 +1,35 @@ +import random +from cqlengine.tests.base import BaseCassEngTestCase + +from cqlengine.management import create_table +from cqlengine.management import delete_table +from cqlengine.models import Model +from cqlengine import columns + +class TestModel(Model): + id = columns.Integer(primary_key=True) + clustering_key = columns.Integer(primary_key=True, clustering_order='desc') + +class TestClusteringOrder(BaseCassEngTestCase): + + @classmethod + def setUpClass(cls): + super(TestClusteringOrder, cls).setUpClass() + create_table(TestModel) + + @classmethod + def tearDownClass(cls): + super(TestClusteringOrder, cls).tearDownClass() + delete_table(TestModel) + + def test_clustering_order(self): + """ + Tests that models can be saved and retrieved + """ + items = list(range(20)) + random.shuffle(items) + for i in items: + TestModel.create(id=1, clustering_key=i) + + values = list(TestModel.objects.values_list('clustering_key', flat=True)) + self.assertEquals(values, sorted(items, reverse=True)) diff --git a/cqlengine/tests/query/test_queryoperators.py b/cqlengine/tests/query/test_queryoperators.py index 5db4a299af..8f5243fbab 100644 --- a/cqlengine/tests/query/test_queryoperators.py +++ b/cqlengine/tests/query/test_queryoperators.py @@ -17,7 +17,7 @@ def test_maxtimeuuid_function(self): col.set_column_name('time') qry = query.EqualsOperator(col, functions.MaxTimeUUID(now)) - assert qry.cql == '"time" = MaxTimeUUID(:{})'.format(qry.identifier) + assert qry.cql == '"time" = MaxTimeUUID(:{})'.format(qry.value.identifier) def test_mintimeuuid_function(self): """ @@ -28,7 +28,23 @@ def test_mintimeuuid_function(self): col.set_column_name('time') qry = query.EqualsOperator(col, functions.MinTimeUUID(now)) - assert qry.cql == '"time" = MinTimeUUID(:{})'.format(qry.identifier) + assert qry.cql == '"time" = MinTimeUUID(:{})'.format(qry.value.identifier) + def test_token_function(self): + class TestModel(Model): + p1 = columns.Text(partition_key=True) + p2 = columns.Text(partition_key=True) + + func = functions.Token('a', 'b') + + q = TestModel.objects.filter(pk__token__gt=func) + self.assertEquals(q._where[0].cql, 'token("p1", "p2") > token(:{}, :{})'.format(*func.identifier)) + + # Token(tuple()) is also possible for convinience + # it (allows for Token(obj.pk) syntax) + func = functions.Token(('a', 'b')) + + q = TestModel.objects.filter(pk__token__gt=func) + self.assertEquals(q._where[0].cql, 'token("p1", "p2") > token(:{}, :{})'.format(*func.identifier)) diff --git a/cqlengine/tests/query/test_queryset.py b/cqlengine/tests/query/test_queryset.py index a77d750299..79ca5af0d3 100644 --- a/cqlengine/tests/query/test_queryset.py +++ b/cqlengine/tests/query/test_queryset.py @@ -64,12 +64,12 @@ def test_where_clause_generation(self): Tests the where clause creation """ query1 = TestModel.objects(test_id=5) - ids = [o.identifier for o in query1._where] + ids = [o.query_value.identifier for o in query1._where] where = query1._where_clause() assert where == '"test_id" = :{}'.format(*ids) query2 = query1.filter(expected_result__gte=1) - ids = [o.identifier for o in query2._where] + ids = [o.query_value.identifier for o in query2._where] where = query2._where_clause() assert where == '"test_id" = :{} AND "expected_result" >= :{}'.format(*ids) @@ -470,5 +470,12 @@ def test_success_case(self): assert q.count() == 8 +class TestValuesList(BaseQuerySetUsage): + def test_values_list(self): + q = TestModel.objects.filter(test_id=0, attempt_id=1) + item = q.values_list('test_id', 'attempt_id', 'description', 'expected_result', 'test_result').first() + assert item == [0, 1, 'try2', 10, 30] + item = q.values_list('expected_result', flat=True).first() + assert item == 10 diff --git a/docs/topics/columns.rst b/docs/topics/columns.rst index 0537e3bccb..0779c4333f 100644 --- a/docs/topics/columns.rst +++ b/docs/topics/columns.rst @@ -145,12 +145,16 @@ Column Options If True, this column is created as a primary key field. A model can have multiple primary keys. Defaults to False. - *In CQL, there are 2 types of primary keys: partition keys and clustering keys. As with CQL, the first primary key is the partition key, and all others are clustering keys.* + *In CQL, there are 2 types of primary keys: partition keys and clustering keys. As with CQL, the first primary key is the partition key, and all others are clustering keys, unless partition keys are specified manually using* :attr:`BaseColumn.partition_key` + + .. attribute:: BaseColumn.partition_key + + If True, this column is created as partition primary key. There may be many partition keys defined, forming *composite partition key* .. attribute:: BaseColumn.index If True, an index will be created for this column. Defaults to False. - + *Note: Indexes can only be created on models with one primary key* .. attribute:: BaseColumn.db_field @@ -165,3 +169,6 @@ Column Options If True, this model cannot be saved without a value defined for this column. Defaults to True. Primary key fields cannot have their required fields set to False. + .. attribute:: BaseColumn.clustering_order + + Defines CLUSTERING ORDER for this column (valid choices are "asc" (default) or "desc"). It may be specified only for clustering primary keys - more: http://www.datastax.com/docs/1.2/cql_cli/cql/CREATE_TABLE#using-clustering-order diff --git a/docs/topics/models.rst b/docs/topics/models.rst index 0dd2c19cf8..52eb90613d 100644 --- a/docs/topics/models.rst +++ b/docs/topics/models.rst @@ -66,7 +66,10 @@ Column Options :attr:`~cqlengine.columns.BaseColumn.primary_key` If True, this column is created as a primary key field. A model can have multiple primary keys. Defaults to False. - *In CQL, there are 2 types of primary keys: partition keys and clustering keys. As with CQL, the first primary key is the partition key, and all others are clustering keys.* + *In CQL, there are 2 types of primary keys: partition keys and clustering keys. As with CQL, the first primary key is the partition key, and all others are clustering keys, unless partition keys are specified manually using* :attr:`~cqlengine.columns.BaseColumn.partition_key` + + :attr:`~cqlengine.columns.BaseColumn.partition_key` + If True, this column is created as partition primary key. There may be many partition keys defined, forming *composite partition key* :attr:`~cqlengine.columns.BaseColumn.index` If True, an index will be created for this column. Defaults to False. diff --git a/docs/topics/queryset.rst b/docs/topics/queryset.rst index 6733080cfe..8490ce3daf 100644 --- a/docs/topics/queryset.rst +++ b/docs/topics/queryset.rst @@ -178,6 +178,26 @@ TimeUUID Functions DataStream.filter(time__gt=cqlengine.MinTimeUUID(min_time), time__lt=cqlengine.MaxTimeUUID(max_time)) +Token Function +============== + + Token functon may be used only on special, virtual column pk__token, representing token of partition key (it also works for composite partition keys). + Cassandra orders returned items by value of partition key token, so using cqlengine.Token we can easy paginate through all table rows. + + *Example* + + .. code-block:: python + + class Items(Model): + id = cqlengine.Text(primary_key=True) + data = cqlengine.Bytes() + + query = Items.objects.all().limit(10) + + first_page = list(query); + last = first_page[-1] + next_page = list(query.filter(pk__token__gt=cqlengine.Token(last.pk))) + QuerySets are imutable ====================== @@ -213,6 +233,13 @@ Ordering QuerySets *For instance, given our Automobile model, year is the only column we can order on.* +Values Lists +============ + + There is a special QuerySet's method ``.values_list()`` - when called, QuerySet returns lists of values instead of model instances. It may significantly speedup things with lower memory footprint for large responses. + Each tuple contains the value from the respective field passed into the ``values_list()`` call — so the first item is the first field, etc. For example: + + Batch Queries =============== From 97d7e647a304dbfe4ee10539949e7aa12e9e7982 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mariusz=20Kry=C5=84ski?= Date: Mon, 27 May 2013 02:14:28 +0200 Subject: [PATCH 0179/3726] Fix for #41 --- cqlengine/models.py | 7 ++++++- cqlengine/tests/query/test_queryset.py | 8 ++++++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/cqlengine/models.py b/cqlengine/models.py index 61d001578e..3753e7f78c 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -26,6 +26,10 @@ def __get__(self, instance, owner): else: return self.instmethod.__get__(instance, owner) +class QuerySetDescriptor(object): + def __get__(self, obj, model): + return QuerySet(model) + class BaseModel(object): """ The base model class, don't inherit from this, inherit from Model, defined below @@ -34,6 +38,8 @@ class BaseModel(object): class DoesNotExist(QueryException): pass class MultipleObjectsReturned(QueryException): pass + objects = QuerySetDescriptor() + #table names will be generated automatically from it's model and package name #however, you can also define them manually here table_name = None @@ -266,7 +272,6 @@ def _transform_column(col_name, col_obj): #create the class and add a QuerySet to it klass = super(ModelMetaClass, cls).__new__(cls, name, bases, attrs) - klass.objects = QuerySet(klass) return klass diff --git a/cqlengine/tests/query/test_queryset.py b/cqlengine/tests/query/test_queryset.py index 5ce12b5ed5..1c3eb95b20 100644 --- a/cqlengine/tests/query/test_queryset.py +++ b/cqlengine/tests/query/test_queryset.py @@ -479,3 +479,11 @@ def test_values_list(self): item = q.values_list('expected_result', flat=True).first() assert item == 10 +class TestObjectsProperty(BaseQuerySetUsage): + + def test_objects_property_returns_fresh_queryset(self): + assert TestModel.objects._result_cache is None + len(TestModel.objects) # evaluate queryset + assert TestModel.objects._result_cache is None + + From f2a401d0541d3eebd4259827404d9827740e9adf Mon Sep 17 00:00:00 2001 From: Kai Lautaportti Date: Mon, 27 May 2013 10:39:26 +0300 Subject: [PATCH 0180/3726] Allow `datetime.date` values with `columns.DateTime`. Adds support for setting a `datetime.date` value to a `DateTime` column. The value will be transparently converted to a `datetime.datetime` with the time components set to zero. --- cqlengine/columns.py | 11 ++++++++++- cqlengine/tests/columns/test_validation.py | 6 ++++++ 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index 3c9bad45ba..07a3842718 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -183,6 +183,7 @@ def validate(self, value): raise ValidationError('{} is shorter than {} characters'.format(self.column_name, self.min_length)) return value + class Integer(Column): db_type = 'int' @@ -200,20 +201,27 @@ def to_python(self, value): def to_database(self, value): return self.validate(value) + class DateTime(Column): db_type = 'timestamp' + def __init__(self, **kwargs): super(DateTime, self).__init__(**kwargs) def to_python(self, value): if isinstance(value, datetime): return value + elif isinstance(value, date): + return datetime(*(value.timetuple()[:6])) return datetime.utcfromtimestamp(value) def to_database(self, value): value = super(DateTime, self).to_database(value) if not isinstance(value, datetime): - raise ValidationError("'{}' is not a datetime object".format(value)) + if isinstance(value, date): + value = datetime(value.year, value.month, value.day) + else: + raise ValidationError("'{}' is not a datetime object".format(value)) epoch = datetime(1970, 1, 1, tzinfo=value.tzinfo) offset = 0 if epoch.tzinfo: @@ -326,6 +334,7 @@ def __init__(self, value): def __str__(self): raise NotImplementedError + class BaseContainerColumn(Column): """ Base Container type diff --git a/cqlengine/tests/columns/test_validation.py b/cqlengine/tests/columns/test_validation.py index e323c4653d..24c1739706 100644 --- a/cqlengine/tests/columns/test_validation.py +++ b/cqlengine/tests/columns/test_validation.py @@ -55,6 +55,12 @@ def dst(self, date_time): dt2 = self.DatetimeTest.objects(test_id=0).first() assert dt2.created_at.timetuple()[:6] == (now + timedelta(hours=1)).timetuple()[:6] + def test_datetime_date_support(self): + today = date.today() + self.DatetimeTest.objects.create(test_id=0, created_at=today) + dt2 = self.DatetimeTest.objects(test_id=0).first() + assert dt2.created_at.isoformat() == datetime(today.year, today.month, today.day).isoformat() + class TestDate(BaseCassEngTestCase): class DateTest(Model): From b1ce5f2f380dad3d45be52510f6d4075aac49e7d Mon Sep 17 00:00:00 2001 From: Kai Lautaportti Date: Mon, 27 May 2013 10:54:54 +0300 Subject: [PATCH 0181/3726] Log the CQL statements at DEBUG level. This logs the CQL statements emitted by cqlengine in the `cqlengine.cql` logger at DEBUG level. For debugging purposes you can enable that logger to help work out problems with your queries. The logger is not enabled by default by `cqlengine`. --- cqlengine/columns.py | 3 +++ cqlengine/connection.py | 3 +++ 2 files changed, 6 insertions(+) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index 3c9bad45ba..7247f75ca5 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -326,6 +326,9 @@ def __init__(self, value): def __str__(self): raise NotImplementedError + def __repr__(self): + return self.__str__() + class BaseContainerColumn(Column): """ Base Container type diff --git a/cqlengine/connection.py b/cqlengine/connection.py index 2a5ef989eb..c0c2b1710e 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -7,11 +7,13 @@ import random import cql +import logging from cqlengine.exceptions import CQLEngineException from thrift.transport.TTransport import TTransportException +LOG = logging.getLogger('cqlengine.cql') class CQLConnectionError(CQLEngineException): pass @@ -167,6 +169,7 @@ def execute(self, query, params={}): for i in range(len(_hosts)): try: + LOG.debug('{} {}'.format(query, repr(params))) self.cur = self.con.cursor() self.cur.execute(query, params) return self.cur From 26d68711469520791dd8a4659b0e245754490dcf Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 30 May 2013 14:28:47 -0700 Subject: [PATCH 0182/3726] ignoring noseids --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 16029f08ec..6d96b7f030 100644 --- a/.gitignore +++ b/.gitignore @@ -38,3 +38,4 @@ html/ #Mr Developer .mr.developer.cfg +.noseids From 3bb7314c60521a93bfbca14e532e257e6b6d1089 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Fri, 31 May 2013 09:34:25 -0700 Subject: [PATCH 0183/3726] adding some docstrings to the function classes --- cqlengine/functions.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/cqlengine/functions.py b/cqlengine/functions.py index 947841808a..5a8bfb548b 100644 --- a/cqlengine/functions.py +++ b/cqlengine/functions.py @@ -36,6 +36,11 @@ class BaseQueryFunction(QueryValue): """ class MinTimeUUID(BaseQueryFunction): + """ + return a fake timeuuid corresponding to the smallest possible timeuuid for the given timestamp + + http://cassandra.apache.org/doc/cql3/CQL.html#timeuuidFun + """ _cql_string = 'MinTimeUUID(:{})' @@ -53,6 +58,11 @@ def get_value(self): return long((self.value - epoch).total_seconds() * 1000) class MaxTimeUUID(BaseQueryFunction): + """ + return a fake timeuuid corresponding to the largest possible timeuuid for the given timestamp + + http://cassandra.apache.org/doc/cql3/CQL.html#timeuuidFun + """ _cql_string = 'MaxTimeUUID(:{})' @@ -70,6 +80,11 @@ def get_value(self): return long((self.value - epoch).total_seconds() * 1000) class Token(BaseQueryFunction): + """ + compute the token for a given partition key + + http://cassandra.apache.org/doc/cql3/CQL.html#tokenFun + """ def __init__(self, *values): if len(values) == 1 and isinstance(values[0], (list, tuple)): From 4a5a11fb5d1ff28a106dc2288d159643cf0d1e2a Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Mon, 3 Jun 2013 09:51:39 -0700 Subject: [PATCH 0184/3726] removed unused column class --- cqlengine/columns.py | 7 ------- 1 file changed, 7 deletions(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index 284702dbce..91240e0ecd 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -661,13 +661,6 @@ def get_delete_statement(self, val, prev, ctx): return del_statements -class _PartitionKeys(Column): - class value_manager(BaseValueManager): - pass - - def __init__(self, model): - self.model = model - class _PartitionKeysToken(Column): """ virtual column representing token of partition columns. From 7ef3cde72722a81d5d9c5362b6b3273d293325e0 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Mon, 3 Jun 2013 09:52:04 -0700 Subject: [PATCH 0185/3726] adding some docstrings and comments --- cqlengine/models.py | 4 +++- cqlengine/query.py | 4 ++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/cqlengine/models.py b/cqlengine/models.py index 3753e7f78c..842a05edd1 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -220,6 +220,8 @@ def _transform_column(col_name, col_obj): #TODO: check that the defined columns don't conflict with any of the Model API's existing attributes/methods #transform column definitions for k,v in column_definitions: + # this will mark the first primary key column as a partition + # key, if one hasn't been set already if not has_partition_keys and v.primary_key: v.partition_key = True has_partition_keys = True @@ -234,7 +236,7 @@ def _transform_column(col_name, col_obj): pk_name = partition_keys.keys()[0] attrs['pk'] = attrs[pk_name] else: - # composite partition key case + # composite partition key case, get/set a tuple of values _get = lambda self: tuple(self._values[c].getval() for c in partition_keys.keys()) _set = lambda self, val: tuple(self._values[c].setval(v) for (c, v) in zip(partition_keys.keys(), val)) attrs['pk'] = property(_get, _set) diff --git a/cqlengine/query.py b/cqlengine/query.py index 160eab9b39..8aee97e747 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -405,6 +405,9 @@ def __getitem__(self, s): return self._result_cache[s] def _create_result_constructor(self, names): + """ + Returns a function that will be used to instantiate query results + """ model = self.model db_map = model._db_map if not self._values_list: @@ -632,6 +635,7 @@ def delete(self, columns=[]): con.execute(qs, self._where_values()) def values_list(self, *fields, **kwargs): + """ Instructs the query set to return tuples, not model instance """ flat = kwargs.pop('flat', False) if kwargs: raise TypeError('Unexpected keyword arguments to values_list: %s' From e75dfe5ef746323b99050fe30d19a267badfbd4a Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Mon, 3 Jun 2013 09:52:40 -0700 Subject: [PATCH 0186/3726] adding some todos --- cqlengine/management.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cqlengine/management.py b/cqlengine/management.py index f5f6a64923..25717aa4c6 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -14,6 +14,7 @@ def create_keyspace(name, strategy_class='SimpleStrategy', replication_factor=3, :param **replication_values: 1.2 only, additional values to ad to the replication data map """ with connection_manager() as con: + #TODO: check system tables instead of using cql thrifteries if not any([name == k.name for k in con.con.client.describe_keyspaces()]): # if name not in [k.name for k in con.con.client.describe_keyspaces()]: try: @@ -55,6 +56,7 @@ def create_table(model, create_missing_keyspace=True): with connection_manager() as con: #check for an existing column family + #TODO: check system tables instead of using cql thrifteries ks_info = con.con.client.describe_keyspace(model._get_keyspace()) if not any([raw_cf_name == cf.name for cf in ks_info.cf_defs]): qs = ['CREATE TABLE {}'.format(cf_name)] From 532f3275191506dab25ed4d5c8f921632c09c909 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Mon, 3 Jun 2013 09:55:45 -0700 Subject: [PATCH 0187/3726] adding cassandra docs url to token function docs --- docs/topics/queryset.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/topics/queryset.rst b/docs/topics/queryset.rst index 8490ce3daf..48e26a7cba 100644 --- a/docs/topics/queryset.rst +++ b/docs/topics/queryset.rst @@ -184,6 +184,8 @@ Token Function Token functon may be used only on special, virtual column pk__token, representing token of partition key (it also works for composite partition keys). Cassandra orders returned items by value of partition key token, so using cqlengine.Token we can easy paginate through all table rows. + See http://cassandra.apache.org/doc/cql3/CQL.html#tokenFun + *Example* .. code-block:: python From 55f16442e36ffea5e35a80a5bb1b128c71f6226f Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Mon, 3 Jun 2013 10:00:15 -0700 Subject: [PATCH 0188/3726] removing commented out method --- cqlengine/models.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/cqlengine/models.py b/cqlengine/models.py index 842a05edd1..d0ea201dac 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -108,11 +108,6 @@ def column_family_name(cls, include_keyspace=True): if not include_keyspace: return cf_name return '{}.{}'.format(cls._get_keyspace(), cf_name) - #@property - #def pk(self): - # """ Returns the object's primary key """ - # return getattr(self, self._pk_name) - def validate(self): """ Cleans and validates the field values """ for name, col in self._columns.items(): From 24eba1bbc0aafc9e503747e688cacfdb88ad2fb8 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Mon, 3 Jun 2013 10:11:38 -0700 Subject: [PATCH 0189/3726] adding contributor guidelines --- CONTRIBUTING.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100644 CONTRIBUTING.md diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000000..62f6d786bb --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,12 @@ +### Contributing code to cqlengine + +Before submitting a pull request, please make sure that it follows these guidelines: + +* Limit yourself to one feature or bug fix per pull request. +* Include unittests that thoroughly test the feature/bug fix +* Write clear, descriptive commit messages. +* Many granular commits are preferred over large monolithic commits +* If you're adding or modifying features, please update the documentation + +If you're working on a big ticket item, please check in on [cqlengine-users](https://groups.google.com/forum/?fromgroups#!forum/cqlengine-users). +We'd hate to have to steer you in a different direction after you've already put in a lot of hard work. From dc14e6927505bc949a00c60b7b543330660a5d6a Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Mon, 3 Jun 2013 10:23:37 -0700 Subject: [PATCH 0190/3726] adding contributor guidelines link to the readme --- README.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/README.md b/README.md index b54711f116..b0037b9465 100644 --- a/README.md +++ b/README.md @@ -74,3 +74,7 @@ example8 >>> print instance.description example5 ``` + +## Contributing + +If you'd like to contribute to cqlengine, please read the [contributor guidelines](https://github.com/bdeggleston/cqlengine/blob/master/CONTRIBUTING.md) From 286bd0e66af0c1c21a961a06fc4e3177c4ecf57c Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Mon, 3 Jun 2013 11:33:37 -0700 Subject: [PATCH 0191/3726] adding custom value quoter to boolean column for compatibility with Cassandra 1.2.5+ --- cqlengine/columns.py | 40 +++++++++++++++++++++++----------------- 1 file changed, 23 insertions(+), 17 deletions(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index 91240e0ecd..8559795474 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -52,6 +52,20 @@ def get_property(self): else: return property(_get, _set) +class ValueQuoter(object): + """ + contains a single value, which will quote itself for CQL insertion statements + """ + def __init__(self, value): + self.value = value + + def __str__(self): + raise NotImplementedError + + def __repr__(self): + return self.__str__() + + class Column(object): #the cassandra type this column maps to @@ -293,11 +307,16 @@ def __init__(self, **kwargs): class Boolean(Column): db_type = 'boolean' + class Quoter(ValueQuoter): + """ Cassandra 1.2.5 is stricter about boolean values """ + def __str__(self): + return 'true' if self.value else 'false' + def to_python(self, value): return bool(value) def to_database(self, value): - return bool(value) + return self.Quoter(bool(value)) class Float(Column): db_type = 'double' @@ -329,19 +348,6 @@ def __init__(self, **kwargs): super(Counter, self).__init__(**kwargs) raise NotImplementedError -class ContainerQuoter(object): - """ - contains a single value, which will quote itself for CQL insertion statements - """ - def __init__(self, value): - self.value = value - - def __str__(self): - raise NotImplementedError - - def __repr__(self): - return self.__str__() - class BaseContainerColumn(Column): """ Base Container type @@ -383,7 +389,7 @@ class Set(BaseContainerColumn): """ db_type = 'set<{}>' - class Quoter(ContainerQuoter): + class Quoter(ValueQuoter): def __str__(self): cq = cql_quote @@ -466,7 +472,7 @@ class List(BaseContainerColumn): """ db_type = 'list<{}>' - class Quoter(ContainerQuoter): + class Quoter(ValueQuoter): def __str__(self): cq = cql_quote @@ -563,7 +569,7 @@ class Map(BaseContainerColumn): db_type = 'map<{}, {}>' - class Quoter(ContainerQuoter): + class Quoter(ValueQuoter): def __str__(self): cq = cql_quote From 24b207c18a785270b5ac90e93b7e774d3d80cc56 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Mon, 3 Jun 2013 11:58:22 -0700 Subject: [PATCH 0192/3726] adding None handling to DateTime to_database --- cqlengine/columns.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index 8559795474..69ad188a3c 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -236,6 +236,7 @@ def to_python(self, value): def to_database(self, value): value = super(DateTime, self).to_database(value) + if value is None: return if not isinstance(value, datetime): if isinstance(value, date): value = datetime(value.year, value.month, value.day) From 40bed98b90d58b980f4086e3901cdab5a4be7b05 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Mon, 3 Jun 2013 13:24:53 -0700 Subject: [PATCH 0193/3726] making DoesNotExist & MultipleObjectsReturned distinct classes for each model, and inherit from parent model if any --- cqlengine/models.py | 25 ++++++++++++--- cqlengine/query.py | 4 +++ .../tests/model/test_class_construction.py | 31 +++++++++++++++++++ 3 files changed, 56 insertions(+), 4 deletions(-) diff --git a/cqlengine/models.py b/cqlengine/models.py index d0ea201dac..bd4e65fabe 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -3,8 +3,9 @@ from cqlengine import columns from cqlengine.exceptions import ModelException -from cqlengine.functions import BaseQueryFunction -from cqlengine.query import QuerySet, QueryException, DMLQuery +from cqlengine.query import QuerySet, DMLQuery +from cqlengine.query import DoesNotExist as _DoesNotExist +from cqlengine.query import MultipleObjectsReturned as _MultipleObjectsReturned class ModelDefinitionException(ModelException): pass @@ -35,8 +36,8 @@ class BaseModel(object): The base model class, don't inherit from this, inherit from Model, defined below """ - class DoesNotExist(QueryException): pass - class MultipleObjectsReturned(QueryException): pass + class DoesNotExist(_DoesNotExist): pass + class MultipleObjectsReturned(_MultipleObjectsReturned): pass objects = QuerySetDescriptor() @@ -182,6 +183,7 @@ def __new__(cls, name, bases, attrs): for k,v in getattr(base, '_defined_columns', {}).items(): inherited_columns.setdefault(k,v) + def _transform_column(col_name, col_obj): column_dict[col_name] = col_obj if col_obj.primary_key: @@ -267,6 +269,21 @@ def _transform_column(col_name, col_obj): attrs['_partition_keys'] = partition_keys attrs['_clustering_keys'] = clustering_keys + #setup class exceptions + DoesNotExistBase = None + for base in bases: + DoesNotExistBase = getattr(base, 'DoesNotExist', None) + if DoesNotExistBase is not None: break + DoesNotExistBase = DoesNotExistBase or attrs.pop('DoesNotExist', BaseModel.DoesNotExist) + attrs['DoesNotExist'] = type('DoesNotExist', (DoesNotExistBase,), {}) + + MultipleObjectsReturnedBase = None + for base in bases: + MultipleObjectsReturnedBase = getattr(base, 'MultipleObjectsReturned', None) + if MultipleObjectsReturnedBase is not None: break + MultipleObjectsReturnedBase = DoesNotExistBase or attrs.pop('MultipleObjectsReturned', BaseModel.MultipleObjectsReturned) + attrs['MultipleObjectsReturned'] = type('MultipleObjectsReturned', (MultipleObjectsReturnedBase,), {}) + #create the class and add a QuerySet to it klass = super(ModelMetaClass, cls).__new__(cls, name, bases, attrs) return klass diff --git a/cqlengine/query.py b/cqlengine/query.py index 8aee97e747..3ab71129f0 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -14,6 +14,10 @@ #http://www.datastax.com/docs/1.1/references/cql/index class QueryException(CQLEngineException): pass +class DoesNotExist(QueryException): pass +class MultipleObjectsReturned(QueryException): pass + + class QueryOperatorException(QueryException): pass class QueryOperator(object): diff --git a/cqlengine/tests/model/test_class_construction.py b/cqlengine/tests/model/test_class_construction.py index 36f986508f..c67fcbee4d 100644 --- a/cqlengine/tests/model/test_class_construction.py +++ b/cqlengine/tests/model/test_class_construction.py @@ -151,6 +151,37 @@ class DelModel(Model): with self.assertRaises(AttributeError): del model.key + def test_does_not_exist_exceptions_are_not_shared_between_model(self): + """ Tests that DoesNotExist exceptions are not the same exception between models """ + + class Model1(Model): + pass + class Model2(Model): + pass + + try: + raise Model1.DoesNotExist + except Model2.DoesNotExist: + assert False, "Model1 exception should not be caught by Model2" + except Model1.DoesNotExist: + #expected + pass + + def test_does_not_exist_inherits_from_superclass(self): + """ Tests that a DoesNotExist exception can be caught by it's parent class DoesNotExist """ + class Model1(Model): + pass + class Model2(Model1): + pass + + try: + raise Model2.DoesNotExist + except Model1.DoesNotExist: + #expected + pass + except Exception: + assert False, "Model2 exception should not be caught by Model1" + class TestManualTableNaming(BaseCassEngTestCase): class RenamedTest(cqlengine.Model): From 1301486d0e223d9feba7453b49fc44ab3a8707c3 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Mon, 3 Jun 2013 13:28:26 -0700 Subject: [PATCH 0194/3726] updating changelog --- changelog | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/changelog b/changelog index 3edb19f0fa..05714651e4 100644 --- a/changelog +++ b/changelog @@ -1,6 +1,15 @@ CHANGELOG -0.2.1 (in progress) +0.3 +* added support for Token function (thanks @mrk-its) +* added support for compound partition key (thanks @mrk-its)s +* added support for defining clustering key ordering (thanks @mrk-its) +* added values_list to Query class, bypassing object creation if desired (thanks @mrk-its) +* fixed bug with Model.objects caching values (thanks @mrk-its) +* fixed Cassandra 1.2.5 compatibility bug +* updated model exception inheritance + +0.2.1 * adding support for datetimes with tzinfo (thanks @gdoermann) * fixing bug in saving map updates (thanks @pandu-rao) From f2b9c14e2906fc2110fa19ae1ce852640c5216ad Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Mon, 3 Jun 2013 13:37:14 -0700 Subject: [PATCH 0195/3726] version number bump --- cqlengine/__init__.py | 2 +- docs/conf.py | 4 ++-- setup.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cqlengine/__init__.py b/cqlengine/__init__.py index ed5a820213..c9dc969515 100644 --- a/cqlengine/__init__.py +++ b/cqlengine/__init__.py @@ -3,5 +3,5 @@ from cqlengine.models import Model from cqlengine.query import BatchQuery -__version__ = '0.2' +__version__ = '0.3' diff --git a/docs/conf.py b/docs/conf.py index 96c7db6ed6..9b7e050a50 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -48,9 +48,9 @@ # built documents. # # The short X.Y version. -version = '0.2' +version = '0.3' # The full version, including alpha/beta/rc tags. -release = '0.2' +release = '0.3' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/setup.py b/setup.py index 0f98baecbe..24545a9b7e 100644 --- a/setup.py +++ b/setup.py @@ -4,7 +4,7 @@ #python setup.py register #python setup.py sdist upload -version = '0.2' +version = '0.3' long_desc = """ cqlengine is a Cassandra CQL Object Mapper for Python in the style of the Django orm and mongoengine From 5eef9bd18d94ab0c3d898653799707268b4148e7 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Mon, 3 Jun 2013 15:46:53 -0700 Subject: [PATCH 0196/3726] adding tests around io for all column types --- cqlengine/tests/columns/test_value_io.py | 136 +++++++++++++++++++++++ 1 file changed, 136 insertions(+) create mode 100644 cqlengine/tests/columns/test_value_io.py diff --git a/cqlengine/tests/columns/test_value_io.py b/cqlengine/tests/columns/test_value_io.py new file mode 100644 index 0000000000..273ff0d56b --- /dev/null +++ b/cqlengine/tests/columns/test_value_io.py @@ -0,0 +1,136 @@ +from datetime import datetime, timedelta +from decimal import Decimal +from uuid import uuid1, uuid4, UUID +from unittest import SkipTest +from cqlengine.tests.base import BaseCassEngTestCase + +from cqlengine.management import create_table +from cqlengine.management import delete_table +from cqlengine.models import Model +from cqlengine import columns + +class BaseColumnIOTest(BaseCassEngTestCase): + + TEST_MODEL = None + TEST_COLUMN = None + + @property + def PKEY_VAL(self): + raise NotImplementedError + + @property + def DATA_VAL(self): + raise NotImplementedError + + @classmethod + def setUpClass(cls): + super(BaseColumnIOTest, cls).setUpClass() + if not cls.TEST_COLUMN: return + class IOTestModel(Model): + table_name = cls.TEST_COLUMN.db_type + "_io_test_model_{}".format(uuid4().hex[:8]) + pkey = cls.TEST_COLUMN(primary_key=True) + data = cls.TEST_COLUMN() + + cls.TEST_MODEL = IOTestModel + create_table(cls.TEST_MODEL) + + #tupleify + if not isinstance(cls.PKEY_VAL, tuple): + cls.PKEY_VAL = cls.PKEY_VAL, + if not isinstance(cls.DATA_VAL, tuple): + cls.DATA_VAL = cls.DATA_VAL, + + @classmethod + def tearDownClass(cls): + super(BaseColumnIOTest, cls).tearDownClass() + if not cls.TEST_COLUMN: return + delete_table(cls.TEST_MODEL) + + def comparator_converter(self, val): + """ If you want to convert the original value used to compare the model vales """ + return val + + def test_column_io(self): + """ Tests the given models class creates and retrieves values as expected """ + if not self.TEST_COLUMN: return + for pkey, data in zip(self.PKEY_VAL, self.DATA_VAL): + #create + m1 = self.TEST_MODEL.create(pkey=pkey, data=data) + + #get + m2 = self.TEST_MODEL.get(pkey=pkey) + assert m1.pkey == m2.pkey == self.comparator_converter(pkey), self.TEST_COLUMN + assert m1.data == m2.data == self.comparator_converter(data), self.TEST_COLUMN + + #delete + self.TEST_MODEL.filter(pkey=pkey).delete() + +class TestTextIO(BaseColumnIOTest): + + TEST_COLUMN = columns.Text + PKEY_VAL = 'bacon' + DATA_VAL = 'monkey' + +class TestInteger(BaseColumnIOTest): + + TEST_COLUMN = columns.Integer + PKEY_VAL = 5 + DATA_VAL = 6 + +class TestDateTime(BaseColumnIOTest): + + TEST_COLUMN = columns.DateTime + now = datetime(*datetime.now().timetuple()[:6]) + PKEY_VAL = now + DATA_VAL = now + timedelta(days=1) + +class TestDate(BaseColumnIOTest): + + TEST_COLUMN = columns.Date + now = datetime.now().date() + PKEY_VAL = now + DATA_VAL = now + timedelta(days=1) + +class TestUUID(BaseColumnIOTest): + + TEST_COLUMN = columns.UUID + + PKEY_VAL = str(uuid4()), uuid4() + DATA_VAL = str(uuid4()), uuid4() + + def comparator_converter(self, val): + return val if isinstance(val, UUID) else UUID(val) + +class TestTimeUUID(BaseColumnIOTest): + + TEST_COLUMN = columns.TimeUUID + + PKEY_VAL = str(uuid1()), uuid1() + DATA_VAL = str(uuid1()), uuid1() + + def comparator_converter(self, val): + return val if isinstance(val, UUID) else UUID(val) + +class TestBooleanIO(BaseColumnIOTest): + + TEST_COLUMN = columns.Boolean + + PKEY_VAL = True + DATA_VAL = False + +class TestFloatIO(BaseColumnIOTest): + + TEST_COLUMN = columns.Float + + PKEY_VAL = 3.14 + DATA_VAL = -1982.11 + +class TestDecimalIO(BaseColumnIOTest): + + TEST_COLUMN = columns.Decimal + + PKEY_VAL = Decimal('1.35'), 5, '2.4' + DATA_VAL = Decimal('0.005'), 3.5, '8' + + def comparator_converter(self, val): + return Decimal(val) From 229be07bab4c74e13849f623984fd3fe602414f5 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Mon, 3 Jun 2013 15:47:22 -0700 Subject: [PATCH 0197/3726] adding some conversion logic to Decimal and UUID columns --- cqlengine/columns.py | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index 69ad188a3c..3f0806345d 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -294,6 +294,12 @@ def validate(self, value): raise ValidationError("{} is not a valid uuid".format(value)) return _UUID(val) + def to_python(self, value): + return self.validate(value) + + def to_database(self, value): + return self.validate(value) + class TimeUUID(UUID): """ UUID containing timestamp @@ -343,6 +349,22 @@ def to_database(self, value): class Decimal(Column): db_type = 'decimal' + def validate(self, value): + from decimal import Decimal as _Decimal + from decimal import InvalidOperation + val = super(Decimal, self).validate(value) + if val is None: return + try: + return _Decimal(val) + except InvalidOperation: + raise ValidationError("'{}' can't be coerced to decimal".format(val)) + + def to_python(self, value): + return self.validate(value) + + def to_database(self, value): + return self.validate(value) + class Counter(Column): #TODO: counter field def __init__(self, **kwargs): From 7a4fe9ae78090c2591f0980cf8614833fb924111 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Mon, 3 Jun 2013 15:48:17 -0700 Subject: [PATCH 0198/3726] fixing the get_dict method on the time uuid functions --- cqlengine/functions.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/cqlengine/functions.py b/cqlengine/functions.py index 5a8bfb548b..eee1fcab5e 100644 --- a/cqlengine/functions.py +++ b/cqlengine/functions.py @@ -57,6 +57,9 @@ def get_value(self): epoch = datetime(1970, 1, 1) return long((self.value - epoch).total_seconds() * 1000) + def get_dict(self, column): + return {self.identifier: self.get_value()} + class MaxTimeUUID(BaseQueryFunction): """ return a fake timeuuid corresponding to the largest possible timeuuid for the given timestamp @@ -79,6 +82,9 @@ def get_value(self): epoch = datetime(1970, 1, 1) return long((self.value - epoch).total_seconds() * 1000) + def get_dict(self, column): + return {self.identifier: self.get_value()} + class Token(BaseQueryFunction): """ compute the token for a given partition key From 45695a9f691357332a60c5ab4749420cce9797f0 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Mon, 3 Jun 2013 15:49:15 -0700 Subject: [PATCH 0199/3726] version number bump --- cqlengine/__init__.py | 2 +- docs/conf.py | 4 ++-- setup.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cqlengine/__init__.py b/cqlengine/__init__.py index c9dc969515..25d51303d5 100644 --- a/cqlengine/__init__.py +++ b/cqlengine/__init__.py @@ -3,5 +3,5 @@ from cqlengine.models import Model from cqlengine.query import BatchQuery -__version__ = '0.3' +__version__ = '0.3.1' diff --git a/docs/conf.py b/docs/conf.py index 9b7e050a50..36e5d084b0 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -48,9 +48,9 @@ # built documents. # # The short X.Y version. -version = '0.3' +version = '0.3.1' # The full version, including alpha/beta/rc tags. -release = '0.3' +release = '0.3.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/setup.py b/setup.py index 24545a9b7e..cec6865dca 100644 --- a/setup.py +++ b/setup.py @@ -4,7 +4,7 @@ #python setup.py register #python setup.py sdist upload -version = '0.3' +version = '0.3.1' long_desc = """ cqlengine is a Cassandra CQL Object Mapper for Python in the style of the Django orm and mongoengine From 8597bfac6c41f9f83b3c45b54a1536f6a7457ff1 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Mon, 3 Jun 2013 13:39:40 -0700 Subject: [PATCH 0200/3726] adding test files and starting to rework the pool --- cqlengine/connection.py | 2 ++ cqlengine/tests/connections/__init__.py | 0 cqlengine/tests/connections/test_connection_pool.py | 0 3 files changed, 2 insertions(+) create mode 100644 cqlengine/tests/connections/__init__.py create mode 100644 cqlengine/tests/connections/test_connection_pool.py diff --git a/cqlengine/connection.py b/cqlengine/connection.py index c0c2b1710e..1fb7b53bd1 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -122,6 +122,8 @@ def put(cls, conn): def _create_connection(cls): """ Creates a new connection for the connection pool. + + should only return a valid connection that it's actually connected to """ global _hosts global _username diff --git a/cqlengine/tests/connections/__init__.py b/cqlengine/tests/connections/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cqlengine/tests/connections/test_connection_pool.py b/cqlengine/tests/connections/test_connection_pool.py new file mode 100644 index 0000000000..e69de29bb2 From 55331ad07b39bf6f2f164cb12a20fa9379517065 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Mon, 3 Jun 2013 13:45:07 -0700 Subject: [PATCH 0201/3726] making ConnectionPool not use a global state --- cqlengine/connection.py | 40 ++++++++++++++++++---------------------- 1 file changed, 18 insertions(+), 22 deletions(-) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index 1fb7b53bd1..e4e2b42dee 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -69,38 +69,35 @@ class ConnectionPool(object): # Connection pool queue _queue = None - @classmethod - def clear(cls): + def __init__(self, hosts): + self._hosts = hosts + self._queue = Queue.Queue(maxsize=_max_connections) + + def clear(self): """ Force the connection pool to be cleared. Will close all internal connections. """ try: - while not cls._queue.empty(): - cls._queue.get().close() + while not self._queue.empty(): + self._queue.get().close() except: pass - @classmethod - def get(cls): + def get(self): """ Returns a usable database connection. Uses the internal queue to determine whether to return an existing connection or to create a new one. """ try: - if cls._queue.empty(): - return cls._create_connection() - return cls._queue.get() + if self._queue.empty(): + return self._create_connection() + return self._queue.get() except CQLConnectionError as cqle: raise cqle - except: - if not cls._queue: - cls._queue = Queue.Queue(maxsize=_max_connections) - return cls._create_connection() - @classmethod - def put(cls, conn): + def put(self, conn): """ Returns a connection to the queue freeing it up for other queries to use. @@ -109,17 +106,16 @@ def put(cls, conn): :type conn: connection """ try: - if cls._queue.full(): + if self._queue.full(): conn.close() else: - cls._queue.put(conn) + self._queue.put(conn) except: - if not cls._queue: - cls._queue = Queue.Queue(maxsize=_max_connections) - cls._queue.put(conn) + if not self._queue: + self._queue = + self._queue.put(conn) - @classmethod - def _create_connection(cls): + def _create_connection(self): """ Creates a new connection for the connection pool. From d742345796812722d94c71bab4bc2a423dc6cd62 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Mon, 3 Jun 2013 13:48:58 -0700 Subject: [PATCH 0202/3726] setup now creates a global connection pool --- cqlengine/connection.py | 26 +++++++++++--------------- 1 file changed, 11 insertions(+), 15 deletions(-) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index e4e2b42dee..e18c1b089b 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -18,25 +18,18 @@ class CQLConnectionError(CQLEngineException): pass Host = namedtuple('Host', ['name', 'port']) -_hosts = [] -_host_idx = 0 -_conn= None -_username = None -_password = None + _max_connections = 10 +_connection_pool = None -def setup(hosts, username=None, password=None, max_connections=10, default_keyspace=None, lazy=False): +def setup(hosts, username=None, password=None, max_connections=10, default_keyspace=None, lazy=True): """ Records the hosts and connects to one of them :param hosts: list of hosts, strings in the :, or just """ - global _hosts - global _username - global _password global _max_connections - _username = username - _password = password + global _connection_pool _max_connections = max_connections if default_keyspace: @@ -56,11 +49,11 @@ def setup(hosts, username=None, password=None, max_connections=10, default_keysp if not _hosts: raise CQLConnectionError("At least one host required") - random.shuffle(_hosts) + _connection_pool = ConnectionPool(_hosts) if not lazy: - con = ConnectionPool.get() - ConnectionPool.put(con) + con = _connection_pool.get() + _connection_pool.put(con) class ConnectionPool(object): @@ -69,8 +62,11 @@ class ConnectionPool(object): # Connection pool queue _queue = None - def __init__(self, hosts): + def __init__(self, hosts, username, password): self._hosts = hosts + self._username = username + self._password = password + self._queue = Queue.Queue(maxsize=_max_connections) def clear(self): From e6a7934fe30ba89187b30f3c696048105d905d02 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Mon, 3 Jun 2013 13:55:30 -0700 Subject: [PATCH 0203/3726] fixing references to removed vars --- cqlengine/connection.py | 25 +++++++------------------ cqlengine/tests/base.py | 6 +++--- 2 files changed, 10 insertions(+), 21 deletions(-) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index e18c1b089b..a1a18df09b 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -22,7 +22,7 @@ class CQLConnectionError(CQLEngineException): pass _max_connections = 10 _connection_pool = None -def setup(hosts, username=None, password=None, max_connections=10, default_keyspace=None, lazy=True): +def setup(hosts, username=None, password=None, max_connections=10, default_keyspace=None): """ Records the hosts and connects to one of them @@ -36,6 +36,7 @@ def setup(hosts, username=None, password=None, max_connections=10, default_keysp from cqlengine import models models.DEFAULT_KEYSPACE = default_keyspace + _hosts = [] for host in hosts: host = host.strip() host = host.split(':') @@ -51,10 +52,6 @@ def setup(hosts, username=None, password=None, max_connections=10, default_keysp _connection_pool = ConnectionPool(_hosts) - if not lazy: - con = _connection_pool.get() - _connection_pool.put(con) - class ConnectionPool(object): """Handles pooling of database connections.""" @@ -101,14 +98,10 @@ def put(self, conn): :param conn: The connection to be released :type conn: connection """ - try: - if self._queue.full(): - conn.close() - else: - self._queue.put(conn) - except: - if not self._queue: - self._queue = + + if self._queue.full(): + conn.close() + else: self._queue.put(conn) def _create_connection(self): @@ -117,11 +110,7 @@ def _create_connection(self): should only return a valid connection that it's actually connected to """ - global _hosts - global _username - global _password - - if not _hosts: + if not self._hosts: raise CQLConnectionError("At least one host required") host = _hosts[_host_idx] diff --git a/cqlengine/tests/base.py b/cqlengine/tests/base.py index 64e6a3c2a9..a7090c627d 100644 --- a/cqlengine/tests/base.py +++ b/cqlengine/tests/base.py @@ -6,13 +6,13 @@ class BaseCassEngTestCase(TestCase): @classmethod def setUpClass(cls): super(BaseCassEngTestCase, cls).setUpClass() - if not connection._hosts: + if not connection._connection_pool: connection.setup(['localhost:9160'], default_keyspace='cqlengine_test') def assertHasAttr(self, obj, attr): - self.assertTrue(hasattr(obj, attr), + self.assertTrue(hasattr(obj, attr), "{} doesn't have attribute: {}".format(obj, attr)) def assertNotHasAttr(self, obj, attr): - self.assertFalse(hasattr(obj, attr), + self.assertFalse(hasattr(obj, attr), "{} shouldn't have the attribute: {}".format(obj, attr)) From 37259b963ee155a17d86ae597a2814e92f818991 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Mon, 3 Jun 2013 14:48:44 -0700 Subject: [PATCH 0204/3726] Getting connection pooling working with mocking library Verified throwing exception when no servers are available, but correctly recovering and hitting the next server when one is. fixing minor pooling tests ensure we get the right exception back when no servers are available working on tests for retry connections working despite failure Removed old connection_manager and replaced with a simple context manager that allows for easy access to clients within the main pool --- .gitignore | 3 + cqlengine/connection.py | 96 +++++------- cqlengine/management.py | 145 +++++++++--------- cqlengine/query.py | 27 ++-- cqlengine/tests/base.py | 6 +- cqlengine/tests/management/test_management.py | 67 ++++---- cqlengine/tests/query/test_queryset.py | 11 -- 7 files changed, 167 insertions(+), 188 deletions(-) diff --git a/.gitignore b/.gitignore index 6d96b7f030..1e4d8eb344 100644 --- a/.gitignore +++ b/.gitignore @@ -39,3 +39,6 @@ html/ #Mr Developer .mr.developer.cfg .noseids +/commitlog +/data + diff --git a/cqlengine/connection.py b/cqlengine/connection.py index a1a18df09b..a7b700afc3 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -9,8 +9,11 @@ import cql import logging +from copy import copy from cqlengine.exceptions import CQLEngineException +from contextlib import contextmanager + from thrift.transport.TTransport import TTransportException LOG = logging.getLogger('cqlengine.cql') @@ -20,7 +23,9 @@ class CQLConnectionError(CQLEngineException): pass Host = namedtuple('Host', ['name', 'port']) _max_connections = 10 -_connection_pool = None + +# global connection pool +connection_pool = None def setup(hosts, username=None, password=None, max_connections=10, default_keyspace=None): """ @@ -29,7 +34,7 @@ def setup(hosts, username=None, password=None, max_connections=10, default_keysp :param hosts: list of hosts, strings in the :, or just """ global _max_connections - global _connection_pool + global connection_pool _max_connections = max_connections if default_keyspace: @@ -50,16 +55,13 @@ def setup(hosts, username=None, password=None, max_connections=10, default_keysp if not _hosts: raise CQLConnectionError("At least one host required") - _connection_pool = ConnectionPool(_hosts) + connection_pool = ConnectionPool(_hosts, username, password) class ConnectionPool(object): """Handles pooling of database connections.""" - # Connection pool queue - _queue = None - - def __init__(self, hosts, username, password): + def __init__(self, hosts, username=None, password=None): self._hosts = hosts self._username = username self._password = password @@ -113,58 +115,40 @@ def _create_connection(self): if not self._hosts: raise CQLConnectionError("At least one host required") - host = _hosts[_host_idx] - - new_conn = cql.connect(host.name, host.port, user=_username, password=_password) - new_conn.set_cql_version('3.0.0') - return new_conn - - -class connection_manager(object): - """ - Connection failure tolerant connection manager. Written to be used in a 'with' block for connection pooling - """ - def __init__(self): - if not _hosts: - raise CQLConnectionError("No connections have been configured, call cqlengine.connection.setup") - self.keyspace = None - self.con = ConnectionPool.get() - self.cur = None + hosts = copy(self._hosts) + random.shuffle(hosts) - def close(self): - if self.cur: self.cur.close() - ConnectionPool.put(self.con) - - def __enter__(self): - return self + for host in hosts: + try: + new_conn = cql.connect(host.name, host.port, user=self._username, password=self._password) + new_conn.set_cql_version('3.0.0') + return new_conn + except Exception as e: + logging.debug("Could not establish connection to {}:{}".format(host.name, host.port)) + pass - def __exit__(self, type, value, traceback): - self.close() + raise CQLConnectionError("Could not connect to any server in cluster") - def execute(self, query, params={}): - """ - Gets a connection from the pool and executes the given query, returns the cursor + def execute(self, query, params): + try: + con = self.get() + cur = con.cursor() + cur.execute(query, params) + self.put(con) + return cur + except cql.ProgrammingError as ex: + raise CQLEngineException(unicode(ex)) + except TTransportException: + pass - if there's a connection problem, this will silently create a new connection pool - from the available hosts, and remove the problematic host from the host list - """ - global _host_idx + raise CQLEngineException("Could not execute query against the cluster") - for i in range(len(_hosts)): - try: - LOG.debug('{} {}'.format(query, repr(params))) - self.cur = self.con.cursor() - self.cur.execute(query, params) - return self.cur - except cql.ProgrammingError as ex: - raise CQLEngineException(unicode(ex)) - except TTransportException: - #TODO: check for other errors raised in the event of a connection / server problem - #move to the next connection and set the connection pool - _host_idx += 1 - _host_idx %= len(_hosts) - self.con.close() - self.con = ConnectionPool._create_connection() - - raise CQLConnectionError("couldn't reach a Cassandra server") +def execute(query, params={}): + return connection_pool.execute(query, params) +@contextmanager +def connection_manager(): + global connection_pool + tmp = connection_pool.get() + yield tmp + connection_pool.put(tmp) diff --git a/cqlengine/management.py b/cqlengine/management.py index 25717aa4c6..feddef2ff1 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -1,6 +1,6 @@ import json -from cqlengine.connection import connection_manager +from cqlengine.connection import connection_manager, execute from cqlengine.exceptions import CQLEngineException def create_keyspace(name, strategy_class='SimpleStrategy', replication_factor=3, durable_writes=True, **replication_values): @@ -15,11 +15,11 @@ def create_keyspace(name, strategy_class='SimpleStrategy', replication_factor=3, """ with connection_manager() as con: #TODO: check system tables instead of using cql thrifteries - if not any([name == k.name for k in con.con.client.describe_keyspaces()]): -# if name not in [k.name for k in con.con.client.describe_keyspaces()]: + if not any([name == k.name for k in con.client.describe_keyspaces()]): + # if name not in [k.name for k in con.con.client.describe_keyspaces()]: try: #Try the 1.1 method - con.execute("""CREATE KEYSPACE {} + execute("""CREATE KEYSPACE {} WITH strategy_class = '{}' AND strategy_options:replication_factor={};""".format(name, strategy_class, replication_factor)) except CQLEngineException: @@ -38,12 +38,12 @@ def create_keyspace(name, strategy_class='SimpleStrategy', replication_factor=3, if strategy_class != 'SimpleStrategy': query += " AND DURABLE_WRITES = {}".format('true' if durable_writes else 'false') - con.execute(query) + execute(query) def delete_keyspace(name): with connection_manager() as con: - if name in [k.name for k in con.con.client.describe_keyspaces()]: - con.execute("DROP KEYSPACE {}".format(name)) + if name in [k.name for k in con.client.describe_keyspaces()]: + execute("DROP KEYSPACE {}".format(name)) def create_table(model, create_missing_keyspace=True): #construct query string @@ -55,78 +55,81 @@ def create_table(model, create_missing_keyspace=True): create_keyspace(model._get_keyspace()) with connection_manager() as con: - #check for an existing column family - #TODO: check system tables instead of using cql thrifteries - ks_info = con.con.client.describe_keyspace(model._get_keyspace()) - if not any([raw_cf_name == cf.name for cf in ks_info.cf_defs]): - qs = ['CREATE TABLE {}'.format(cf_name)] - - #add column types - pkeys = [] - ckeys = [] - qtypes = [] - def add_column(col): - s = col.get_column_def() - if col.primary_key: - keys = (pkeys if col.partition_key else ckeys) - keys.append('"{}"'.format(col.db_field_name)) - qtypes.append(s) - for name, col in model._columns.items(): - add_column(col) - - qtypes.append('PRIMARY KEY (({}){})'.format(', '.join(pkeys), ckeys and ', ' + ', '.join(ckeys) or '')) - - qs += ['({})'.format(', '.join(qtypes))] - - with_qs = ['read_repair_chance = {}'.format(model.read_repair_chance)] - - _order = ["%s %s" % (c.db_field_name, c.clustering_order or 'ASC') for c in model._clustering_keys.values()] - if _order: - with_qs.append("clustering order by ({})".format(', '.join(_order))) - - # add read_repair_chance - qs += ['WITH {}'.format(' AND '.join(with_qs))] + ks_info = con.client.describe_keyspace(model._get_keyspace()) + + #check for an existing column family + #TODO: check system tables instead of using cql thrifteries + if not any([raw_cf_name == cf.name for cf in ks_info.cf_defs]): + qs = ['CREATE TABLE {}'.format(cf_name)] + + #add column types + pkeys = [] + ckeys = [] + qtypes = [] + def add_column(col): + s = col.get_column_def() + if col.primary_key: + keys = (pkeys if col.partition_key else ckeys) + keys.append('"{}"'.format(col.db_field_name)) + qtypes.append(s) + for name, col in model._columns.items(): + add_column(col) + + qtypes.append('PRIMARY KEY (({}){})'.format(', '.join(pkeys), ckeys and ', ' + ', '.join(ckeys) or '')) + + qs += ['({})'.format(', '.join(qtypes))] + + with_qs = ['read_repair_chance = {}'.format(model.read_repair_chance)] + + _order = ["%s %s" % (c.db_field_name, c.clustering_order or 'ASC') for c in model._clustering_keys.values()] + if _order: + with_qs.append("clustering order by ({})".format(', '.join(_order))) + + # add read_repair_chance + qs += ['WITH {}'.format(' AND '.join(with_qs))] + qs = ' '.join(qs) + + try: + execute(qs) + except CQLEngineException as ex: + # 1.2 doesn't return cf names, so we have to examine the exception + # and ignore if it says the column family already exists + if "Cannot add already existing column family" not in unicode(ex): + raise + + #get existing index names, skip ones that already exist + with connection_manager() as con: + ks_info = con.client.describe_keyspace(model._get_keyspace()) + + cf_defs = [cf for cf in ks_info.cf_defs if cf.name == raw_cf_name] + idx_names = [i.index_name for i in cf_defs[0].column_metadata] if cf_defs else [] + idx_names = filter(None, idx_names) + + indexes = [c for n,c in model._columns.items() if c.index] + if indexes: + for column in indexes: + if column.db_index_name in idx_names: continue + qs = ['CREATE INDEX index_{}_{}'.format(raw_cf_name, column.db_field_name)] + qs += ['ON {}'.format(cf_name)] + qs += ['("{}")'.format(column.db_field_name)] qs = ' '.join(qs) try: - con.execute(qs) + execute(qs) except CQLEngineException as ex: # 1.2 doesn't return cf names, so we have to examine the exception - # and ignore if it says the column family already exists - if "Cannot add already existing column family" not in unicode(ex): + # and ignore if it says the index already exists + if "Index already exists" not in unicode(ex): raise - #get existing index names, skip ones that already exist - ks_info = con.con.client.describe_keyspace(model._get_keyspace()) - cf_defs = [cf for cf in ks_info.cf_defs if cf.name == raw_cf_name] - idx_names = [i.index_name for i in cf_defs[0].column_metadata] if cf_defs else [] - idx_names = filter(None, idx_names) - - indexes = [c for n,c in model._columns.items() if c.index] - if indexes: - for column in indexes: - if column.db_index_name in idx_names: continue - qs = ['CREATE INDEX index_{}_{}'.format(raw_cf_name, column.db_field_name)] - qs += ['ON {}'.format(cf_name)] - qs += ['("{}")'.format(column.db_field_name)] - qs = ' '.join(qs) - - try: - con.execute(qs) - except CQLEngineException as ex: - # 1.2 doesn't return cf names, so we have to examine the exception - # and ignore if it says the index already exists - if "Index already exists" not in unicode(ex): - raise - def delete_table(model): cf_name = model.column_family_name() - with connection_manager() as con: - try: - con.execute('drop table {};'.format(cf_name)) - except CQLEngineException as ex: - #don't freak out if the table doesn't exist - if 'Cannot drop non existing column family' not in unicode(ex): - raise + + try: + execute('drop table {};'.format(cf_name)) + except CQLEngineException as ex: + #don't freak out if the table doesn't exist + if 'Cannot drop non existing column family' not in unicode(ex): + raise diff --git a/cqlengine/query.py b/cqlengine/query.py index 3ab71129f0..d3ae8352e0 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -6,7 +6,8 @@ from uuid import uuid1 from cqlengine import BaseContainerColumn, BaseValueManager, Map, columns -from cqlengine.connection import connection_manager +from cqlengine.connection import connection_pool, connection_manager, execute + from cqlengine.exceptions import CQLEngineException from cqlengine.functions import QueryValue, Token @@ -193,8 +194,7 @@ def execute(self): query_list.append('APPLY BATCH;') - with connection_manager() as con: - con.execute('\n'.join(query_list), parameters) + execute('\n'.join(query_list), parameters) self.queries = [] @@ -346,8 +346,7 @@ def _execute_query(self): if self._batch: raise CQLEngineException("Only inserts, updates, and deletes are available in batch mode") if self._result_cache is None: - self._con = connection_manager() - self._cur = self._con.execute(self._select_query(), self._where_values()) + self._cur = execute(self._select_query(), self._where_values()) self._result_cache = [None]*self._cur.rowcount if self._cur.description: names = [i[0] for i in self._cur.description] @@ -368,7 +367,6 @@ def _fill_result_cache_to_idx(self, idx): #return the connection to the connection pool if we have all objects if self._result_cache and self._result_idx == (len(self._result_cache) - 1): - self._con.close() self._con = None self._cur = None @@ -555,9 +553,8 @@ def count(self): qs = ' '.join(qs) - with connection_manager() as con: - cur = con.execute(qs, self._where_values()) - return cur.fetchone()[0] + cur = execute(qs, self._where_values()) + return cur.fetchone()[0] else: return len(self._result_cache) @@ -635,8 +632,7 @@ def delete(self, columns=[]): if self._batch: self._batch.add_query(qs, self._where_values()) else: - with connection_manager() as con: - con.execute(qs, self._where_values()) + execute(qs, self._where_values()) def values_list(self, *fields, **kwargs): """ Instructs the query set to return tuples, not model instance """ @@ -753,8 +749,7 @@ def save(self): if self.batch: self.batch.add_query(qs, query_values) else: - with connection_manager() as con: - con.execute(qs, query_values) + execute(qs, query_values) # delete nulled columns and removed map keys @@ -787,8 +782,7 @@ def save(self): if self.batch: self.batch.add_query(qs, query_values) else: - with connection_manager() as con: - con.execute(qs, query_values) + execute(qs, query_values) def delete(self): """ Deletes one instance """ @@ -809,7 +803,6 @@ def delete(self): if self.batch: self.batch.add_query(qs, field_values) else: - with connection_manager() as con: - con.execute(qs, field_values) + execute(qs, field_values) diff --git a/cqlengine/tests/base.py b/cqlengine/tests/base.py index a7090c627d..4400881111 100644 --- a/cqlengine/tests/base.py +++ b/cqlengine/tests/base.py @@ -1,13 +1,13 @@ from unittest import TestCase -from cqlengine import connection +from cqlengine import connection class BaseCassEngTestCase(TestCase): @classmethod def setUpClass(cls): super(BaseCassEngTestCase, cls).setUpClass() - if not connection._connection_pool: - connection.setup(['localhost:9160'], default_keyspace='cqlengine_test') + # todo fix + connection.setup(['localhost:9160'], default_keyspace='cqlengine_test') def assertHasAttr(self, obj, attr): self.assertTrue(hasattr(obj, attr), diff --git a/cqlengine/tests/management/test_management.py b/cqlengine/tests/management/test_management.py index ac542bb8e6..be49199ff6 100644 --- a/cqlengine/tests/management/test_management.py +++ b/cqlengine/tests/management/test_management.py @@ -1,44 +1,51 @@ +from cqlengine.exceptions import CQLEngineException from cqlengine.management import create_table, delete_table from cqlengine.tests.base import BaseCassEngTestCase -from cqlengine.connection import ConnectionPool +from cqlengine.connection import ConnectionPool, Host -from mock import Mock +from mock import Mock, MagicMock, MagicProxy, patch from cqlengine import management from cqlengine.tests.query.test_queryset import TestModel +from cql.thrifteries import ThriftConnection -class ConnectionPoolTestCase(BaseCassEngTestCase): +class ConnectionPoolFailoverTestCase(BaseCassEngTestCase): """Test cassandra connection pooling.""" def setUp(self): - ConnectionPool.clear() - - def test_should_create_single_connection_on_request(self): - """Should create a single connection on first request""" - result = ConnectionPool.get() - self.assertIsNotNone(result) - self.assertEquals(0, ConnectionPool._queue.qsize()) - ConnectionPool._queue.put(result) - self.assertEquals(1, ConnectionPool._queue.qsize()) - - def test_should_close_connection_if_queue_is_full(self): - """Should close additional connections if queue is full""" - connections = [ConnectionPool.get() for x in range(10)] - for conn in connections: - ConnectionPool.put(conn) - fake_conn = Mock() - ConnectionPool.put(fake_conn) - fake_conn.close.assert_called_once_with() - - def test_should_pop_connections_from_queue(self): - """Should pull existing connections off of the queue""" - conn = ConnectionPool.get() - ConnectionPool.put(conn) - self.assertEquals(1, ConnectionPool._queue.qsize()) - self.assertEquals(conn, ConnectionPool.get()) - self.assertEquals(0, ConnectionPool._queue.qsize()) - + self.host = Host('127.0.0.1', '9160') + self.pool = ConnectionPool([self.host]) + + def test_totally_dead_pool(self): + # kill the con + with patch('cqlengine.connection.cql.connect') as mock: + mock.side_effect=CQLEngineException + with self.assertRaises(CQLEngineException): + self.pool.execute("select * from system.peers", {}) + + def test_dead_node(self): + self.pool._hosts.append(self.host) + + # cursor mock needed so set_cql_version doesn't crap out + ok_cur = MagicMock() + + ok_conn = MagicMock() + ok_conn.return_value = ok_cur + + + returns = [CQLEngineException(), ok_conn] + + def side_effect(*args, **kwargs): + result = returns.pop(0) + if isinstance(result, Exception): + raise result + return result + + with patch('cqlengine.connection.cql.connect') as mock: + mock.side_effect = side_effect + conn = self.pool._create_connection() + class CreateKeyspaceTest(BaseCassEngTestCase): def test_create_succeeeds(self): diff --git a/cqlengine/tests/query/test_queryset.py b/cqlengine/tests/query/test_queryset.py index 1c3eb95b20..17954d184a 100644 --- a/cqlengine/tests/query/test_queryset.py +++ b/cqlengine/tests/query/test_queryset.py @@ -405,18 +405,7 @@ def test_conn_is_returned_after_filling_cache(self): assert q._con is None assert q._cur is None - def test_conn_is_returned_after_queryset_is_garbage_collected(self): - """ Tests that the connection is returned to the connection pool after the queryset is gc'd """ - from cqlengine.connection import ConnectionPool - # The queue size can be 1 if we just run this file's tests - # It will be 2 when we run 'em all - initial_size = ConnectionPool._queue.qsize() - q = TestModel.objects(test_id=0) - v = q[0] - assert ConnectionPool._queue.qsize() == initial_size - 1 - del q - assert ConnectionPool._queue.qsize() == initial_size class TimeUUIDQueryModel(Model): partition = columns.UUID(primary_key=True) From 66548e7f243c75d5160d7bfdd9a2aa06ef6186ce Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Mon, 3 Jun 2013 17:30:57 -0700 Subject: [PATCH 0205/3726] test comment --- cqlengine/tests/management/test_management.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cqlengine/tests/management/test_management.py b/cqlengine/tests/management/test_management.py index be49199ff6..4e271c58ce 100644 --- a/cqlengine/tests/management/test_management.py +++ b/cqlengine/tests/management/test_management.py @@ -25,6 +25,9 @@ def test_totally_dead_pool(self): self.pool.execute("select * from system.peers", {}) def test_dead_node(self): + """ + tests that a single dead node doesn't mess up the pool + """ self.pool._hosts.append(self.host) # cursor mock needed so set_cql_version doesn't crap out From f94c61228ce7e1f329685b78f5a894e19f39c5df Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Mon, 3 Jun 2013 18:09:11 -0700 Subject: [PATCH 0206/3726] clarifying how the column io tests work --- cqlengine/tests/columns/test_value_io.py | 37 +++++++++++++----------- 1 file changed, 20 insertions(+), 17 deletions(-) diff --git a/cqlengine/tests/columns/test_value_io.py b/cqlengine/tests/columns/test_value_io.py index 273ff0d56b..86621d65bf 100644 --- a/cqlengine/tests/columns/test_value_io.py +++ b/cqlengine/tests/columns/test_value_io.py @@ -1,7 +1,6 @@ from datetime import datetime, timedelta from decimal import Decimal from uuid import uuid1, uuid4, UUID -from unittest import SkipTest from cqlengine.tests.base import BaseCassEngTestCase from cqlengine.management import create_table @@ -10,31 +9,35 @@ from cqlengine import columns class BaseColumnIOTest(BaseCassEngTestCase): + """ Tests that values are come out of cassandra in the format we expect """ - TEST_MODEL = None - TEST_COLUMN = None + # The generated test model is assigned here + _test_model = None - @property - def PKEY_VAL(self): - raise NotImplementedError + # the column we want to test + TEST_COLUMN = None - @property - def DATA_VAL(self): - raise NotImplementedError + # the values we want to test against, you can + # use a single value, or multiple comma separated values + PKEY_VAL = None + DATA_VAL = None @classmethod def setUpClass(cls): super(BaseColumnIOTest, cls).setUpClass() + + #if the test column hasn't been defined, bail out if not cls.TEST_COLUMN: return + + # create a table with the given column class IOTestModel(Model): table_name = cls.TEST_COLUMN.db_type + "_io_test_model_{}".format(uuid4().hex[:8]) pkey = cls.TEST_COLUMN(primary_key=True) data = cls.TEST_COLUMN() + cls._test_model = IOTestModel + create_table(cls._test_model) - cls.TEST_MODEL = IOTestModel - create_table(cls.TEST_MODEL) - - #tupleify + #tupleify the tested values if not isinstance(cls.PKEY_VAL, tuple): cls.PKEY_VAL = cls.PKEY_VAL, if not isinstance(cls.DATA_VAL, tuple): @@ -44,7 +47,7 @@ class IOTestModel(Model): def tearDownClass(cls): super(BaseColumnIOTest, cls).tearDownClass() if not cls.TEST_COLUMN: return - delete_table(cls.TEST_MODEL) + delete_table(cls._test_model) def comparator_converter(self, val): """ If you want to convert the original value used to compare the model vales """ @@ -55,15 +58,15 @@ def test_column_io(self): if not self.TEST_COLUMN: return for pkey, data in zip(self.PKEY_VAL, self.DATA_VAL): #create - m1 = self.TEST_MODEL.create(pkey=pkey, data=data) + m1 = self._test_model.create(pkey=pkey, data=data) #get - m2 = self.TEST_MODEL.get(pkey=pkey) + m2 = self._test_model.get(pkey=pkey) assert m1.pkey == m2.pkey == self.comparator_converter(pkey), self.TEST_COLUMN assert m1.data == m2.data == self.comparator_converter(data), self.TEST_COLUMN #delete - self.TEST_MODEL.filter(pkey=pkey).delete() + self._test_model.filter(pkey=pkey).delete() class TestTextIO(BaseColumnIOTest): From f4c7a5bde249dc5d08246a196fec7f664bfcbb5c Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Mon, 3 Jun 2013 18:12:46 -0700 Subject: [PATCH 0207/3726] more clarification --- cqlengine/tests/columns/test_value_io.py | 95 +++++++++++++----------- 1 file changed, 51 insertions(+), 44 deletions(-) diff --git a/cqlengine/tests/columns/test_value_io.py b/cqlengine/tests/columns/test_value_io.py index 86621d65bf..910bf0bd2c 100644 --- a/cqlengine/tests/columns/test_value_io.py +++ b/cqlengine/tests/columns/test_value_io.py @@ -9,44 +9,49 @@ from cqlengine import columns class BaseColumnIOTest(BaseCassEngTestCase): - """ Tests that values are come out of cassandra in the format we expect """ + """ + Tests that values are come out of cassandra in the format we expect + + To test a column type, subclass this test, define the column, and the primary key + and data values you want to test + """ # The generated test model is assigned here _test_model = None # the column we want to test - TEST_COLUMN = None + test_column = None # the values we want to test against, you can # use a single value, or multiple comma separated values - PKEY_VAL = None - DATA_VAL = None + pkey_val = None + data_val = None @classmethod def setUpClass(cls): super(BaseColumnIOTest, cls).setUpClass() #if the test column hasn't been defined, bail out - if not cls.TEST_COLUMN: return + if not cls.test_column: return # create a table with the given column class IOTestModel(Model): - table_name = cls.TEST_COLUMN.db_type + "_io_test_model_{}".format(uuid4().hex[:8]) - pkey = cls.TEST_COLUMN(primary_key=True) - data = cls.TEST_COLUMN() + table_name = cls.test_column.db_type + "_io_test_model_{}".format(uuid4().hex[:8]) + pkey = cls.test_column(primary_key=True) + data = cls.test_column() cls._test_model = IOTestModel create_table(cls._test_model) #tupleify the tested values - if not isinstance(cls.PKEY_VAL, tuple): - cls.PKEY_VAL = cls.PKEY_VAL, - if not isinstance(cls.DATA_VAL, tuple): - cls.DATA_VAL = cls.DATA_VAL, + if not isinstance(cls.pkey_val, tuple): + cls.pkey_val = cls.pkey_val, + if not isinstance(cls.data_val, tuple): + cls.data_val = cls.data_val, @classmethod def tearDownClass(cls): super(BaseColumnIOTest, cls).tearDownClass() - if not cls.TEST_COLUMN: return + if not cls.test_column: return delete_table(cls._test_model) def comparator_converter(self, val): @@ -55,85 +60,87 @@ def comparator_converter(self, val): def test_column_io(self): """ Tests the given models class creates and retrieves values as expected """ - if not self.TEST_COLUMN: return - for pkey, data in zip(self.PKEY_VAL, self.DATA_VAL): + if not self.test_column: return + for pkey, data in zip(self.pkey_val, self.data_val): #create m1 = self._test_model.create(pkey=pkey, data=data) #get m2 = self._test_model.get(pkey=pkey) - assert m1.pkey == m2.pkey == self.comparator_converter(pkey), self.TEST_COLUMN - assert m1.data == m2.data == self.comparator_converter(data), self.TEST_COLUMN + assert m1.pkey == m2.pkey == self.comparator_converter(pkey), self.test_column + assert m1.data == m2.data == self.comparator_converter(data), self.test_column #delete self._test_model.filter(pkey=pkey).delete() class TestTextIO(BaseColumnIOTest): - TEST_COLUMN = columns.Text - PKEY_VAL = 'bacon' - DATA_VAL = 'monkey' + test_column = columns.Text + pkey_val = 'bacon' + data_val = 'monkey' class TestInteger(BaseColumnIOTest): - TEST_COLUMN = columns.Integer - PKEY_VAL = 5 - DATA_VAL = 6 + test_column = columns.Integer + pkey_val = 5 + data_val = 6 class TestDateTime(BaseColumnIOTest): - TEST_COLUMN = columns.DateTime + test_column = columns.DateTime + now = datetime(*datetime.now().timetuple()[:6]) - PKEY_VAL = now - DATA_VAL = now + timedelta(days=1) + pkey_val = now + data_val = now + timedelta(days=1) class TestDate(BaseColumnIOTest): - TEST_COLUMN = columns.Date + test_column = columns.Date + now = datetime.now().date() - PKEY_VAL = now - DATA_VAL = now + timedelta(days=1) + pkey_val = now + data_val = now + timedelta(days=1) class TestUUID(BaseColumnIOTest): - TEST_COLUMN = columns.UUID + test_column = columns.UUID - PKEY_VAL = str(uuid4()), uuid4() - DATA_VAL = str(uuid4()), uuid4() + pkey_val = str(uuid4()), uuid4() + data_val = str(uuid4()), uuid4() def comparator_converter(self, val): return val if isinstance(val, UUID) else UUID(val) class TestTimeUUID(BaseColumnIOTest): - TEST_COLUMN = columns.TimeUUID + test_column = columns.TimeUUID - PKEY_VAL = str(uuid1()), uuid1() - DATA_VAL = str(uuid1()), uuid1() + pkey_val = str(uuid1()), uuid1() + data_val = str(uuid1()), uuid1() def comparator_converter(self, val): return val if isinstance(val, UUID) else UUID(val) class TestBooleanIO(BaseColumnIOTest): - TEST_COLUMN = columns.Boolean + test_column = columns.Boolean - PKEY_VAL = True - DATA_VAL = False + pkey_val = True + data_val = False class TestFloatIO(BaseColumnIOTest): - TEST_COLUMN = columns.Float + test_column = columns.Float - PKEY_VAL = 3.14 - DATA_VAL = -1982.11 + pkey_val = 3.14 + data_val = -1982.11 class TestDecimalIO(BaseColumnIOTest): - TEST_COLUMN = columns.Decimal + test_column = columns.Decimal - PKEY_VAL = Decimal('1.35'), 5, '2.4' - DATA_VAL = Decimal('0.005'), 3.5, '8' + pkey_val = Decimal('1.35'), 5, '2.4' + data_val = Decimal('0.005'), 3.5, '8' def comparator_converter(self, val): return Decimal(val) From 20ba2fcc78e1eecd718190bdc2cef78ed80ac93e Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Tue, 4 Jun 2013 13:21:12 -0700 Subject: [PATCH 0208/3726] fixing some attribute names that were upsetting py.test reflection --- cqlengine/tests/columns/test_value_io.py | 51 ++++++++++++------------ 1 file changed, 26 insertions(+), 25 deletions(-) diff --git a/cqlengine/tests/columns/test_value_io.py b/cqlengine/tests/columns/test_value_io.py index 910bf0bd2c..fda632e965 100644 --- a/cqlengine/tests/columns/test_value_io.py +++ b/cqlengine/tests/columns/test_value_io.py @@ -1,6 +1,7 @@ from datetime import datetime, timedelta from decimal import Decimal from uuid import uuid1, uuid4, UUID + from cqlengine.tests.base import BaseCassEngTestCase from cqlengine.management import create_table @@ -17,10 +18,10 @@ class BaseColumnIOTest(BaseCassEngTestCase): """ # The generated test model is assigned here - _test_model = None + _generated_model = None # the column we want to test - test_column = None + column = None # the values we want to test against, you can # use a single value, or multiple comma separated values @@ -32,15 +33,15 @@ def setUpClass(cls): super(BaseColumnIOTest, cls).setUpClass() #if the test column hasn't been defined, bail out - if not cls.test_column: return + if not cls.column: return # create a table with the given column class IOTestModel(Model): - table_name = cls.test_column.db_type + "_io_test_model_{}".format(uuid4().hex[:8]) - pkey = cls.test_column(primary_key=True) - data = cls.test_column() - cls._test_model = IOTestModel - create_table(cls._test_model) + table_name = cls.column.db_type + "_io_test_model_{}".format(uuid4().hex[:8]) + pkey = cls.column(primary_key=True) + data = cls.column() + cls._generated_model = IOTestModel + create_table(cls._generated_model) #tupleify the tested values if not isinstance(cls.pkey_val, tuple): @@ -51,8 +52,8 @@ class IOTestModel(Model): @classmethod def tearDownClass(cls): super(BaseColumnIOTest, cls).tearDownClass() - if not cls.test_column: return - delete_table(cls._test_model) + if not cls.column: return + delete_table(cls._generated_model) def comparator_converter(self, val): """ If you want to convert the original value used to compare the model vales """ @@ -60,34 +61,34 @@ def comparator_converter(self, val): def test_column_io(self): """ Tests the given models class creates and retrieves values as expected """ - if not self.test_column: return + if not self.column: return for pkey, data in zip(self.pkey_val, self.data_val): #create - m1 = self._test_model.create(pkey=pkey, data=data) + m1 = self._generated_model.create(pkey=pkey, data=data) #get - m2 = self._test_model.get(pkey=pkey) - assert m1.pkey == m2.pkey == self.comparator_converter(pkey), self.test_column - assert m1.data == m2.data == self.comparator_converter(data), self.test_column + m2 = self._generated_model.get(pkey=pkey) + assert m1.pkey == m2.pkey == self.comparator_converter(pkey), self.column + assert m1.data == m2.data == self.comparator_converter(data), self.column #delete - self._test_model.filter(pkey=pkey).delete() + self._generated_model.filter(pkey=pkey).delete() class TestTextIO(BaseColumnIOTest): - test_column = columns.Text + column = columns.Text pkey_val = 'bacon' data_val = 'monkey' class TestInteger(BaseColumnIOTest): - test_column = columns.Integer + column = columns.Integer pkey_val = 5 data_val = 6 class TestDateTime(BaseColumnIOTest): - test_column = columns.DateTime + column = columns.DateTime now = datetime(*datetime.now().timetuple()[:6]) pkey_val = now @@ -95,7 +96,7 @@ class TestDateTime(BaseColumnIOTest): class TestDate(BaseColumnIOTest): - test_column = columns.Date + column = columns.Date now = datetime.now().date() pkey_val = now @@ -103,7 +104,7 @@ class TestDate(BaseColumnIOTest): class TestUUID(BaseColumnIOTest): - test_column = columns.UUID + column = columns.UUID pkey_val = str(uuid4()), uuid4() data_val = str(uuid4()), uuid4() @@ -113,7 +114,7 @@ def comparator_converter(self, val): class TestTimeUUID(BaseColumnIOTest): - test_column = columns.TimeUUID + column = columns.TimeUUID pkey_val = str(uuid1()), uuid1() data_val = str(uuid1()), uuid1() @@ -123,21 +124,21 @@ def comparator_converter(self, val): class TestBooleanIO(BaseColumnIOTest): - test_column = columns.Boolean + column = columns.Boolean pkey_val = True data_val = False class TestFloatIO(BaseColumnIOTest): - test_column = columns.Float + column = columns.Float pkey_val = 3.14 data_val = -1982.11 class TestDecimalIO(BaseColumnIOTest): - test_column = columns.Decimal + column = columns.Decimal pkey_val = Decimal('1.35'), 5, '2.4' data_val = Decimal('0.005'), 3.5, '8' From 67055e006e1125446c56c05e07c1bd60afb2b1c8 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Tue, 4 Jun 2013 13:40:19 -0700 Subject: [PATCH 0209/3726] fixing bug in uuid string validation --- cqlengine/columns.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index 3f0806345d..4f93b34ca9 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -290,9 +290,9 @@ def validate(self, value): if val is None: return from uuid import UUID as _UUID if isinstance(val, _UUID): return val - if not self.re_uuid.match(val): - raise ValidationError("{} is not a valid uuid".format(value)) - return _UUID(val) + if isinstance(val, basestring) and self.re_uuid.match(val): + return _UUID(val) + raise ValidationError("{} is not a valid uuid".format(value)) def to_python(self, value): return self.validate(value) From 750c58c27c71e176b27aba8462c18d36e5b764a4 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Tue, 4 Jun 2013 13:43:58 -0700 Subject: [PATCH 0210/3726] version bump --- cqlengine/__init__.py | 2 +- docs/conf.py | 4 ++-- setup.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cqlengine/__init__.py b/cqlengine/__init__.py index 25d51303d5..a593cdce5c 100644 --- a/cqlengine/__init__.py +++ b/cqlengine/__init__.py @@ -3,5 +3,5 @@ from cqlengine.models import Model from cqlengine.query import BatchQuery -__version__ = '0.3.1' +__version__ = '0.3.2' diff --git a/docs/conf.py b/docs/conf.py index 36e5d084b0..ecb08cc150 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -48,9 +48,9 @@ # built documents. # # The short X.Y version. -version = '0.3.1' +version = '0.3.2' # The full version, including alpha/beta/rc tags. -release = '0.3.1' +release = '0.3.2' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/setup.py b/setup.py index cec6865dca..7692b9f853 100644 --- a/setup.py +++ b/setup.py @@ -4,7 +4,7 @@ #python setup.py register #python setup.py sdist upload -version = '0.3.1' +version = '0.3.2' long_desc = """ cqlengine is a Cassandra CQL Object Mapper for Python in the style of the Django orm and mongoengine From 634c0e9ae0c8cbf2daa5fed1f85f4d00f41f1da0 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Wed, 5 Jun 2013 14:21:58 -0700 Subject: [PATCH 0211/3726] adding support for abstract base classes --- cqlengine/management.py | 4 + cqlengine/models.py | 13 +++- .../tests/model/test_class_construction.py | 74 ++++++++++++++++++- docs/topics/models.rst | 4 + 4 files changed, 91 insertions(+), 4 deletions(-) diff --git a/cqlengine/management.py b/cqlengine/management.py index feddef2ff1..90005d2bc2 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -46,6 +46,10 @@ def delete_keyspace(name): execute("DROP KEYSPACE {}".format(name)) def create_table(model, create_missing_keyspace=True): + + if model.__abstract__: + raise CQLEngineException("cannot create table from abstract model") + #construct query string cf_name = model.column_family_name() raw_cf_name = model.column_family_name(include_keyspace=False) diff --git a/cqlengine/models.py b/cqlengine/models.py index bd4e65fabe..99a6c780e0 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -2,7 +2,7 @@ import re from cqlengine import columns -from cqlengine.exceptions import ModelException +from cqlengine.exceptions import ModelException, CQLEngineException from cqlengine.query import QuerySet, DMLQuery from cqlengine.query import DoesNotExist as _DoesNotExist from cqlengine.query import MultipleObjectsReturned as _MultipleObjectsReturned @@ -29,6 +29,8 @@ def __get__(self, instance, owner): class QuerySetDescriptor(object): def __get__(self, obj, model): + if model.__abstract__: + raise CQLEngineException('cannot execute queries against abstract models') return QuerySet(model) class BaseModel(object): @@ -183,6 +185,8 @@ def __new__(cls, name, bases, attrs): for k,v in getattr(base, '_defined_columns', {}).items(): inherited_columns.setdefault(k,v) + #short circuit __abstract__ inheritance + is_abstract = attrs['__abstract__'] = attrs.get('__abstract__', False) def _transform_column(col_name, col_obj): column_dict[col_name] = col_obj @@ -208,7 +212,7 @@ def _transform_column(col_name, col_obj): defined_columns = OrderedDict(column_definitions) #prepend primary key if one hasn't been defined - if not any([v.primary_key for k,v in column_definitions]): + if not is_abstract and not any([v.primary_key for k,v in column_definitions]): k,v = 'id', columns.UUID(primary_key=True) column_definitions = [(k,v)] + column_definitions @@ -228,7 +232,9 @@ def _transform_column(col_name, col_obj): clustering_keys = OrderedDict(k for k in primary_keys.items() if not k[1].partition_key) #setup partition key shortcut - assert partition_keys + if len(partition_keys) == 0: + if not is_abstract: + raise ModelException("at least one partition key must be defined") if len(partition_keys) == 1: pk_name = partition_keys.keys()[0] attrs['pk'] = attrs[pk_name] @@ -294,6 +300,7 @@ class Model(BaseModel): the db name for the column family can be set as the attribute db_name, or it will be genertaed from the class name """ + __abstract__ = True __metaclass__ = ModelMetaClass diff --git a/cqlengine/tests/model/test_class_construction.py b/cqlengine/tests/model/test_class_construction.py index c67fcbee4d..a37b6baa3c 100644 --- a/cqlengine/tests/model/test_class_construction.py +++ b/cqlengine/tests/model/test_class_construction.py @@ -1,6 +1,7 @@ +from cqlengine.query import QueryException from cqlengine.tests.base import BaseCassEngTestCase -from cqlengine.exceptions import ModelException +from cqlengine.exceptions import ModelException, CQLEngineException from cqlengine.models import Model from cqlengine import columns import cqlengine @@ -199,8 +200,79 @@ def test_manual_table_name_is_not_inherited(self): class InheritedTest(self.RenamedTest): pass assert InheritedTest.table_name is None +class AbstractModel(Model): + __abstract__ = True +class ConcreteModel(AbstractModel): + pkey = columns.Integer(primary_key=True) + data = columns.Integer() +class AbstractModelWithCol(Model): + __abstract__ = True + pkey = columns.Integer(primary_key=True) + +class ConcreteModelWithCol(AbstractModelWithCol): + data = columns.Integer() + +class AbstractModelWithFullCols(Model): + __abstract__ = True + pkey = columns.Integer(primary_key=True) + data = columns.Integer() + +class TestAbstractModelClasses(BaseCassEngTestCase): + + def test_id_field_is_not_created(self): + """ Tests that an id field is not automatically generated on abstract classes """ + assert not hasattr(AbstractModel, 'id') + assert not hasattr(AbstractModelWithCol, 'id') + + def test_id_field_is_not_created_on_subclass(self): + assert not hasattr(ConcreteModel, 'id') + + def test_abstract_attribute_is_not_inherited(self): + """ Tests that __abstract__ attribute is not inherited """ + assert not ConcreteModel.__abstract__ + assert not ConcreteModelWithCol.__abstract__ + + def test_attempting_to_save_abstract_model_fails(self): + """ Attempting to save a model from an abstract model should fail """ + with self.assertRaises(CQLEngineException): + AbstractModelWithFullCols.create(pkey=1, data=2) + + def test_attempting_to_create_abstract_table_fails(self): + """ Attempting to create a table from an abstract model should fail """ + from cqlengine.management import create_table + with self.assertRaises(CQLEngineException): + create_table(AbstractModelWithFullCols) + + def test_attempting_query_on_abstract_model_fails(self): + """ Tests attempting to execute query with an abstract model fails """ + with self.assertRaises(CQLEngineException): + iter(AbstractModelWithFullCols.objects(pkey=5)).next() + + def test_abstract_columns_are_inherited(self): + """ Tests that columns defined in the abstract class are inherited into the concrete class """ + assert hasattr(ConcreteModelWithCol, 'pkey') + assert isinstance(ConcreteModelWithCol.pkey, property) + assert isinstance(ConcreteModelWithCol._columns['pkey'], columns.Column) + + def test_concrete_class_table_creation_cycle(self): + """ Tests that models with inherited abstract classes can be created, and have io performed """ + from cqlengine.management import create_table, delete_table + create_table(ConcreteModelWithCol) + + w1 = ConcreteModelWithCol.create(pkey=5, data=6) + w2 = ConcreteModelWithCol.create(pkey=6, data=7) + + r1 = ConcreteModelWithCol.get(pkey=5) + r2 = ConcreteModelWithCol.get(pkey=6) + + assert w1.pkey == r1.pkey + assert w1.data == r1.data + assert w2.pkey == r2.pkey + assert w2.data == r2.data + + delete_table(ConcreteModelWithCol) diff --git a/docs/topics/models.rst b/docs/topics/models.rst index 52eb90613d..5dc322f61b 100644 --- a/docs/topics/models.rst +++ b/docs/topics/models.rst @@ -128,6 +128,10 @@ Model Methods Model Attributes ================ + .. attribute:: Model.__abstract__ + + *Optional.* Indicates that this model is only intended to be used as a base class for other models. You can't create tables for abstract models, but checks around schema validity are skipped during class construction. + .. attribute:: Model.table_name *Optional.* Sets the name of the CQL table for this model. If left blank, the table name will be the name of the model, with it's module name as it's prefix. Manually defined table names are not inherited. From fd27adf80ebe64bafb7972d0a674d6ce5fc2f9e3 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Wed, 5 Jun 2013 14:25:33 -0700 Subject: [PATCH 0212/3726] updating changelog --- changelog | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/changelog b/changelog index 05714651e4..8111f51f61 100644 --- a/changelog +++ b/changelog @@ -1,5 +1,11 @@ CHANGELOG +0.3.3 +* added abstract base class models + +0.3.2 +* comprehesive rewrite of connection management (thanks @rustyrazorblade) + 0.3 * added support for Token function (thanks @mrk-its) * added support for compound partition key (thanks @mrk-its)s From d6707038df441e6b0e05d7eedcafaa0a59b407f3 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Wed, 5 Jun 2013 14:25:40 -0700 Subject: [PATCH 0213/3726] version bump --- cqlengine/__init__.py | 2 +- docs/conf.py | 4 ++-- setup.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cqlengine/__init__.py b/cqlengine/__init__.py index a593cdce5c..d103644c90 100644 --- a/cqlengine/__init__.py +++ b/cqlengine/__init__.py @@ -3,5 +3,5 @@ from cqlengine.models import Model from cqlengine.query import BatchQuery -__version__ = '0.3.2' +__version__ = '0.3.3' diff --git a/docs/conf.py b/docs/conf.py index ecb08cc150..5d797fee3e 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -48,9 +48,9 @@ # built documents. # # The short X.Y version. -version = '0.3.2' +version = '0.3.3' # The full version, including alpha/beta/rc tags. -release = '0.3.2' +release = '0.3.3' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/setup.py b/setup.py index 7692b9f853..512d55e902 100644 --- a/setup.py +++ b/setup.py @@ -4,7 +4,7 @@ #python setup.py register #python setup.py sdist upload -version = '0.3.2' +version = '0.3.3' long_desc = """ cqlengine is a Cassandra CQL Object Mapper for Python in the style of the Django orm and mongoengine From f8654657578cfc09273c8d45c9c829699756b059 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Fri, 7 Jun 2013 16:18:59 -0700 Subject: [PATCH 0214/3726] making connection consistency more configurable --- cqlengine/connection.py | 15 +++++++++++---- cqlengine/tests/base.py | 2 +- docs/topics/connection.rst | 11 ++++++++++- 3 files changed, 22 insertions(+), 6 deletions(-) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index a7b700afc3..5010aebc1a 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -27,7 +27,7 @@ class CQLConnectionError(CQLEngineException): pass # global connection pool connection_pool = None -def setup(hosts, username=None, password=None, max_connections=10, default_keyspace=None): +def setup(hosts, username=None, password=None, max_connections=10, default_keyspace=None, consistency='ONE'): """ Records the hosts and connects to one of them @@ -55,16 +55,17 @@ def setup(hosts, username=None, password=None, max_connections=10, default_keysp if not _hosts: raise CQLConnectionError("At least one host required") - connection_pool = ConnectionPool(_hosts, username, password) + connection_pool = ConnectionPool(_hosts, username, password, consistency) class ConnectionPool(object): """Handles pooling of database connections.""" - def __init__(self, hosts, username=None, password=None): + def __init__(self, hosts, username=None, password=None, consistency=None): self._hosts = hosts self._username = username self._password = password + self._consistency = consistency self._queue = Queue.Queue(maxsize=_max_connections) @@ -120,7 +121,13 @@ def _create_connection(self): for host in hosts: try: - new_conn = cql.connect(host.name, host.port, user=self._username, password=self._password) + new_conn = cql.connect( + host.name, + host.port, + user=self._username, + password=self._password, + consistency_level=self._consistency + ) new_conn.set_cql_version('3.0.0') return new_conn except Exception as e: diff --git a/cqlengine/tests/base.py b/cqlengine/tests/base.py index 4400881111..126049a860 100644 --- a/cqlengine/tests/base.py +++ b/cqlengine/tests/base.py @@ -1,5 +1,5 @@ from unittest import TestCase -from cqlengine import connection +from cqlengine import connection class BaseCassEngTestCase(TestCase): diff --git a/docs/topics/connection.rst b/docs/topics/connection.rst index 14bc2341c3..48558f9c46 100644 --- a/docs/topics/connection.rst +++ b/docs/topics/connection.rst @@ -7,11 +7,20 @@ Connection The setup function in `cqlengine.connection` records the Cassandra servers to connect to. If there is a problem with one of the servers, cqlengine will try to connect to each of the other connections before failing. -.. function:: setup(hosts [, username=None, password=None]) +.. function:: setup(hosts [, username=None, password=None, consistency='ONE']) :param hosts: list of hosts, strings in the :, or just :type hosts: list + :param username: a username, if required + :type username: str + + :param password: a password, if required + :type password: str + + :param consistency: the consistency level of the connection, defaults to 'ONE' + :type consistency: str + Records the hosts and connects to one of them See the example at :ref:`getting-started` From 226e900fdf5ff85ebd16bb72c4ef37f666f1ad72 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Fri, 7 Jun 2013 16:29:28 -0700 Subject: [PATCH 0215/3726] adding some IDE hints to batch and query set descriptors --- cqlengine/models.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/cqlengine/models.py b/cqlengine/models.py index 99a6c780e0..64faf6a4d1 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -27,12 +27,25 @@ def __get__(self, instance, owner): else: return self.instmethod.__get__(instance, owner) + def __call__(self, *args, **kwargs): + """ Just a hint to IDEs that it's ok to call this """ + raise NotImplementedError + class QuerySetDescriptor(object): + """ + returns a fresh queryset for the given model + it's declared on everytime it's accessed + """ + def __get__(self, obj, model): if model.__abstract__: raise CQLEngineException('cannot execute queries against abstract models') return QuerySet(model) + def __call__(self, *args, **kwargs): + """ Just a hint to IDEs that it's ok to call this """ + raise NotImplementedError + class BaseModel(object): """ The base model class, don't inherit from this, inherit from Model, defined below From 35811d1fab64266655ff335c8afbdd5969c332f6 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Fri, 7 Jun 2013 17:42:12 -0700 Subject: [PATCH 0216/3726] expanding column documentation --- cqlengine/columns.py | 18 +++++++++++++++--- docs/topics/columns.rst | 2 +- 2 files changed, 16 insertions(+), 4 deletions(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index 4f93b34ca9..d864780cfc 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -74,14 +74,26 @@ class Column(object): instance_counter = 0 - def __init__(self, primary_key=False, partition_key=False, index=False, db_field=None, default=None, required=True, clustering_order=None): + def __init__(self, + primary_key=False, + partition_key=False, + index=False, + db_field=None, + default=None, + required=True, + clustering_order=None): """ :param primary_key: bool flag, indicates this column is a primary key. The first primary key defined - on a model is the partition key, all others are cluster keys + on a model is the partition key (unless partition keys are set), all others are cluster keys + :param partition_key: indicates that this column should be the partition key, defining + more than one partition key column creates a compound partition key :param index: bool flag, indicates an index should be created for this column :param db_field: the fieldname this field will map to in the database :param default: the default value, can be a value or a callable (no args) - :param required: boolean, is the field required? + :param required: boolean, is the field required? Model validation will raise and + exception if required is set to True and there is a None value assigned + :param clustering_order: only applicable on clustering keys (primary keys that are not partition keys) + determines the order that the clustering keys are sorted on disk """ self.partition_key = partition_key self.primary_key = partition_key or primary_key diff --git a/docs/topics/columns.rst b/docs/topics/columns.rst index 0779c4333f..61252e7f97 100644 --- a/docs/topics/columns.rst +++ b/docs/topics/columns.rst @@ -149,7 +149,7 @@ Column Options .. attribute:: BaseColumn.partition_key - If True, this column is created as partition primary key. There may be many partition keys defined, forming *composite partition key* + If True, this column is created as partition primary key. There may be many partition keys defined, forming a *composite partition key* .. attribute:: BaseColumn.index From f6fd5b100cf4d91bd0ba15a6c733183a208eeef7 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Fri, 14 Jun 2013 15:47:26 -0700 Subject: [PATCH 0217/3726] beginning work on query builder --- cqlengine/query.py | 60 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 60 insertions(+) diff --git a/cqlengine/query.py b/cqlengine/query.py index d3ae8352e0..dd511d5939 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -154,6 +154,66 @@ class LessThanOrEqualOperator(QueryOperator): symbol = "LTE" cql_symbol = '<=' +class AbstractColumnDescriptor(object): + """ + exposes cql query operators through pythons + builtin comparator symbols + """ + + def _get_column(self): + raise NotImplementedError + + def __eq__(self, other): + return EqualsOperator(self._get_column(), other) + + def __contains__(self, item): + return InOperator(self._get_column(), item) + + def __gt__(self, other): + return GreaterThanOperator(self._get_column(), other) + + def __ge__(self, other): + return GreaterThanOrEqualOperator(self._get_column(), other) + + def __lt__(self, other): + return LessThanOperator(self._get_column(), other) + + def __le__(self, other): + return LessThanOrEqualOperator(self._get_column(), other) + + +class NamedColumnDescriptor(AbstractColumnDescriptor): + """ describes a named cql column """ + + def __init__(self, name): + self.name = name + +C = NamedColumnDescriptor + +class TableDescriptor(object): + """ describes a cql table """ + + def __init__(self, keyspace, name): + self.keyspace = keyspace + self.name = name + +T = TableDescriptor + +class KeyspaceDescriptor(object): + """ Describes a cql keyspace """ + + def __init__(self, name): + self.name = name + + def table(self, name): + """ + returns a table descriptor with the given + name that belongs to this keyspace + """ + return TableDescriptor(self.name, name) + +K = KeyspaceDescriptor + class BatchType(object): Unlogged = 'UNLOGGED' Counter = 'COUNTER' From 4a977005f3569d1a946d7021486c1ba465b14a08 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Fri, 14 Jun 2013 17:52:13 -0700 Subject: [PATCH 0218/3726] fixed #67 (validation on keyname typos) #66 (removed defaults on columns) and #57 and removed autoid --- changelog | 4 +++ cqlengine/columns.py | 12 ------- cqlengine/models.py | 24 ++++++++------ cqlengine/tests/columns/test_validation.py | 17 ++++++++-- .../tests/model/test_class_construction.py | 31 ++++++++++++++----- .../tests/model/test_equality_operations.py | 2 ++ cqlengine/tests/model/test_model_io.py | 10 ++++++ 7 files changed, 68 insertions(+), 32 deletions(-) diff --git a/changelog b/changelog index 8111f51f61..3d94f5fb91 100644 --- a/changelog +++ b/changelog @@ -1,5 +1,9 @@ CHANGELOG +0.4.0 +* removed default values from all column types +* explicit primary key is required (automatic id removed) + 0.3.3 * added abstract base class models diff --git a/cqlengine/columns.py b/cqlengine/columns.py index d864780cfc..67112eb059 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -236,9 +236,6 @@ def to_database(self, value): class DateTime(Column): db_type = 'timestamp' - def __init__(self, **kwargs): - super(DateTime, self).__init__(**kwargs) - def to_python(self, value): if isinstance(value, datetime): return value @@ -265,8 +262,6 @@ def to_database(self, value): class Date(Column): db_type = 'timestamp' - def __init__(self, **kwargs): - super(Date, self).__init__(**kwargs) def to_python(self, value): if isinstance(value, datetime): @@ -294,9 +289,6 @@ class UUID(Column): re_uuid = re.compile(r'[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}') - def __init__(self, default=lambda:uuid4(), **kwargs): - super(UUID, self).__init__(default=default, **kwargs) - def validate(self, value): val = super(UUID, self).validate(value) if val is None: return @@ -319,10 +311,6 @@ class TimeUUID(UUID): db_type = 'timeuuid' - def __init__(self, **kwargs): - kwargs.setdefault('default', lambda: uuid1()) - super(TimeUUID, self).__init__(**kwargs) - class Boolean(Column): db_type = 'boolean' diff --git a/cqlengine/models.py b/cqlengine/models.py index 64faf6a4d1..7240bf16ca 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -2,7 +2,7 @@ import re from cqlengine import columns -from cqlengine.exceptions import ModelException, CQLEngineException +from cqlengine.exceptions import ModelException, CQLEngineException, ValidationError from cqlengine.query import QuerySet, DMLQuery from cqlengine.query import DoesNotExist as _DoesNotExist from cqlengine.query import MultipleObjectsReturned as _MultipleObjectsReturned @@ -50,7 +50,7 @@ class BaseModel(object): """ The base model class, don't inherit from this, inherit from Model, defined below """ - + class DoesNotExist(_DoesNotExist): pass class MultipleObjectsReturned(_MultipleObjectsReturned): pass @@ -60,12 +60,17 @@ class MultipleObjectsReturned(_MultipleObjectsReturned): pass #however, you can also define them manually here table_name = None - #the keyspace for this model + #the keyspace for this model keyspace = None read_repair_chance = 0.1 def __init__(self, **values): self._values = {} + + extra_columns = set(values.keys()) - set(self._columns.keys()) + if extra_columns: + raise ValidationError("Incorrect columns passed: {}".format(extra_columns)) + for name, column in self._columns.items(): value = values.get(name, None) if value is not None: value = column.to_python(value) @@ -111,11 +116,11 @@ def column_family_name(cls, include_keyspace=True): else: camelcase = re.compile(r'([a-z])([A-Z])') ccase = lambda s: camelcase.sub(lambda v: '{}_{}'.format(v.group(1), v.group(2).lower()), s) - + module = cls.__module__.split('.') if module: cf_name = ccase(module[-1]) + '_' - + cf_name += ccase(cls.__name__) #trim to less than 48 characters or cassandra will complain cf_name = cf_name[-48:] @@ -140,15 +145,15 @@ def as_dict(self): @classmethod def create(cls, **kwargs): return cls.objects.create(**kwargs) - + @classmethod def all(cls): return cls.objects.all() - + @classmethod def filter(cls, **kwargs): return cls.objects.filter(**kwargs) - + @classmethod def get(cls, **kwargs): return cls.objects.get(**kwargs) @@ -226,8 +231,7 @@ def _transform_column(col_name, col_obj): #prepend primary key if one hasn't been defined if not is_abstract and not any([v.primary_key for k,v in column_definitions]): - k,v = 'id', columns.UUID(primary_key=True) - column_definitions = [(k,v)] + column_definitions + raise ModelDefinitionException("At least 1 primary key is required.") has_partition_keys = any(v.partition_key for (k, v) in column_definitions) diff --git a/cqlengine/tests/columns/test_validation.py b/cqlengine/tests/columns/test_validation.py index 24c1739706..acbf9099a9 100644 --- a/cqlengine/tests/columns/test_validation.py +++ b/cqlengine/tests/columns/test_validation.py @@ -3,6 +3,7 @@ from datetime import date from datetime import tzinfo from decimal import Decimal as D +from uuid import uuid4, uuid1 from cqlengine import ValidationError from cqlengine.tests.base import BaseCassEngTestCase @@ -119,7 +120,7 @@ def test_datetime_io(self): class TestTimeUUID(BaseCassEngTestCase): class TimeUUIDTest(Model): test_id = Integer(primary_key=True) - timeuuid = TimeUUID() + timeuuid = TimeUUID(default=uuid1()) @classmethod def setUpClass(cls): @@ -132,6 +133,10 @@ def tearDownClass(cls): delete_table(cls.TimeUUIDTest) def test_timeuuid_io(self): + """ + ensures that + :return: + """ t0 = self.TimeUUIDTest.create(test_id=0) t1 = self.TimeUUIDTest.get(test_id=0) @@ -139,8 +144,8 @@ def test_timeuuid_io(self): class TestInteger(BaseCassEngTestCase): class IntegerTest(Model): - test_id = UUID(primary_key=True) - value = Integer(default=0) + test_id = UUID(primary_key=True, default=lambda:uuid4()) + value = Integer(default=0, required=True) def test_default_zero_fields_validate(self): """ Tests that integer columns with a default value of 0 validate """ @@ -190,7 +195,13 @@ def test_type_checking(self): +class TestExtraFieldsRaiseException(BaseCassEngTestCase): + class TestModel(Model): + id = UUID(primary_key=True, default=uuid4) + def test_extra_field(self): + with self.assertRaises(ValidationError): + self.TestModel.create(bacon=5000) diff --git a/cqlengine/tests/model/test_class_construction.py b/cqlengine/tests/model/test_class_construction.py index a37b6baa3c..3e249be2e4 100644 --- a/cqlengine/tests/model/test_class_construction.py +++ b/cqlengine/tests/model/test_class_construction.py @@ -1,8 +1,9 @@ +from uuid import uuid4 from cqlengine.query import QueryException from cqlengine.tests.base import BaseCassEngTestCase from cqlengine.exceptions import ModelException, CQLEngineException -from cqlengine.models import Model +from cqlengine.models import Model, ModelDefinitionException from cqlengine import columns import cqlengine @@ -18,6 +19,7 @@ def test_column_attributes_handled_correctly(self): """ class TestModel(Model): + id = columns.UUID(primary_key=True, default=lambda:uuid4()) text = columns.Text() #check class attibutes @@ -38,6 +40,7 @@ def test_db_map(self): -the db_map allows columns """ class WildDBNames(Model): + id = columns.UUID(primary_key=True, default=lambda:uuid4()) content = columns.Text(db_field='words_and_whatnot') numbers = columns.Integer(db_field='integers_etc') @@ -61,17 +64,26 @@ def test_column_ordering_is_preserved(self): """ class Stuff(Model): + id = columns.UUID(primary_key=True, default=lambda:uuid4()) words = columns.Text() content = columns.Text() numbers = columns.Integer() self.assertEquals(Stuff._columns.keys(), ['id', 'words', 'content', 'numbers']) + def test_exception_raised_when_creating_class_without_pk(self): + with self.assertRaises(ModelDefinitionException): + class TestModel(Model): + count = columns.Integer() + text = columns.Text(required=False) + + def test_value_managers_are_keeping_model_instances_isolated(self): """ Tests that instance value managers are isolated from other instances """ class Stuff(Model): + id = columns.UUID(primary_key=True, default=lambda:uuid4()) num = columns.Integer() inst1 = Stuff(num=5) @@ -86,6 +98,7 @@ def test_superclass_fields_are_inherited(self): Tests that fields defined on the super class are inherited properly """ class TestModel(Model): + id = columns.UUID(primary_key=True, default=lambda:uuid4()) text = columns.Text() class InheritedModel(TestModel): @@ -124,6 +137,7 @@ def test_partition_keys(self): Test compound partition key definition """ class ModelWithPartitionKeys(cqlengine.Model): + id = columns.UUID(primary_key=True, default=lambda:uuid4()) c1 = cqlengine.Text(primary_key=True) p1 = cqlengine.Text(partition_key=True) p2 = cqlengine.Text(partition_key=True) @@ -144,6 +158,7 @@ class ModelWithPartitionKeys(cqlengine.Model): def test_del_attribute_is_assigned_properly(self): """ Tests that columns that can be deleted have the del attribute """ class DelModel(Model): + id = columns.UUID(primary_key=True, default=lambda:uuid4()) key = columns.Integer(primary_key=True) data = columns.Integer(required=False) @@ -156,9 +171,10 @@ def test_does_not_exist_exceptions_are_not_shared_between_model(self): """ Tests that DoesNotExist exceptions are not the same exception between models """ class Model1(Model): - pass + id = columns.UUID(primary_key=True, default=lambda:uuid4()) + class Model2(Model): - pass + id = columns.UUID(primary_key=True, default=lambda:uuid4()) try: raise Model1.DoesNotExist @@ -171,7 +187,8 @@ class Model2(Model): def test_does_not_exist_inherits_from_superclass(self): """ Tests that a DoesNotExist exception can be caught by it's parent class DoesNotExist """ class Model1(Model): - pass + id = columns.UUID(primary_key=True, default=lambda:uuid4()) + class Model2(Model1): pass @@ -184,14 +201,14 @@ class Model2(Model1): assert False, "Model2 exception should not be caught by Model1" class TestManualTableNaming(BaseCassEngTestCase): - + class RenamedTest(cqlengine.Model): keyspace = 'whatever' table_name = 'manual_name' - + id = cqlengine.UUID(primary_key=True) data = cqlengine.Text() - + def test_proper_table_naming(self): assert self.RenamedTest.column_family_name(include_keyspace=False) == 'manual_name' assert self.RenamedTest.column_family_name(include_keyspace=True) == 'whatever.manual_name' diff --git a/cqlengine/tests/model/test_equality_operations.py b/cqlengine/tests/model/test_equality_operations.py index 4d5b95219b..a3e592b3dd 100644 --- a/cqlengine/tests/model/test_equality_operations.py +++ b/cqlengine/tests/model/test_equality_operations.py @@ -1,4 +1,5 @@ from unittest import skip +from uuid import uuid4 from cqlengine.tests.base import BaseCassEngTestCase from cqlengine.management import create_table @@ -7,6 +8,7 @@ from cqlengine import columns class TestModel(Model): + id = columns.UUID(primary_key=True, default=lambda:uuid4()) count = columns.Integer() text = columns.Text(required=False) diff --git a/cqlengine/tests/model/test_model_io.py b/cqlengine/tests/model/test_model_io.py index 08e77ded36..08d41d8b27 100644 --- a/cqlengine/tests/model/test_model_io.py +++ b/cqlengine/tests/model/test_model_io.py @@ -8,10 +8,18 @@ from cqlengine import columns class TestModel(Model): + id = columns.UUID(primary_key=True, default=lambda:uuid4()) count = columns.Integer() text = columns.Text(required=False) a_bool = columns.Boolean(default=False) +class TestModel(Model): + id = columns.UUID(primary_key=True, default=lambda:uuid4()) + count = columns.Integer() + text = columns.Text(required=False) + a_bool = columns.Boolean(default=False) + + class TestModelIO(BaseCassEngTestCase): @classmethod @@ -34,6 +42,8 @@ def test_model_save_and_load(self): for cname in tm._columns.keys(): self.assertEquals(getattr(tm, cname), getattr(tm2, cname)) + + def test_model_updating_works_properly(self): """ Tests that subsequent saves after initial model creation work From f254f4da7a4e93e7b757da272a79c59eb01b64ab Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Fri, 14 Jun 2013 17:53:00 -0700 Subject: [PATCH 0219/3726] updated changelog --- changelog | 1 + 1 file changed, 1 insertion(+) diff --git a/changelog b/changelog index 3d94f5fb91..f17057b083 100644 --- a/changelog +++ b/changelog @@ -3,6 +3,7 @@ CHANGELOG 0.4.0 * removed default values from all column types * explicit primary key is required (automatic id removed) +* added validation on keyname types on .create() 0.3.3 * added abstract base class models From 23a59e85a2c2c1717417e7ce4f8ff29df117e82d Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Fri, 14 Jun 2013 17:53:15 -0700 Subject: [PATCH 0220/3726] changelog update --- changelog | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/changelog b/changelog index f17057b083..f16994fa22 100644 --- a/changelog +++ b/changelog @@ -1,6 +1,6 @@ CHANGELOG -0.4.0 +0.4.0 (in progress) * removed default values from all column types * explicit primary key is required (automatic id removed) * added validation on keyname types on .create() From 30c59e5e8fa7e1cdc70cf0109690a46f44ed9074 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Fri, 14 Jun 2013 17:57:42 -0700 Subject: [PATCH 0221/3726] required is false by default #61 --- cqlengine/columns.py | 2 +- cqlengine/tests/columns/test_validation.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index 67112eb059..941d2419aa 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -80,7 +80,7 @@ def __init__(self, index=False, db_field=None, default=None, - required=True, + required=False, clustering_order=None): """ :param primary_key: bool flag, indicates this column is a primary key. The first primary key defined diff --git a/cqlengine/tests/columns/test_validation.py b/cqlengine/tests/columns/test_validation.py index acbf9099a9..627ccbd3e6 100644 --- a/cqlengine/tests/columns/test_validation.py +++ b/cqlengine/tests/columns/test_validation.py @@ -185,7 +185,7 @@ def test_type_checking(self): Text().validate(bytearray('bytearray')) with self.assertRaises(ValidationError): - Text().validate(None) + Text(required=True).validate(None) with self.assertRaises(ValidationError): Text().validate(5) From 0476a97a67c433843c0ad8dd2d713f1495648ac2 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sat, 15 Jun 2013 10:06:55 -0700 Subject: [PATCH 0222/3726] adding query method, and expanding the column descriptor --- cqlengine/query.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/cqlengine/query.py b/cqlengine/query.py index dd511d5939..88d16aa42a 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -188,6 +188,13 @@ class NamedColumnDescriptor(AbstractColumnDescriptor): def __init__(self, name): self.name = name + @property + def cql(self): + return self.name + + def to_database(self, val): + return val + C = NamedColumnDescriptor class TableDescriptor(object): @@ -544,6 +551,12 @@ def filter(self, **kwargs): return clone + def query(self, *args): + """ + Same end result as filter, but uses the new comparator style args + ie: Model.column == val + """ + def get(self, **kwargs): """ Returns a single instance matching this query, optionally with additional filter kwargs. From c90da56021b2c047e3a294fbc17d211a2aa8c567 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sat, 15 Jun 2013 10:30:02 -0700 Subject: [PATCH 0223/3726] replacing property stuff with ColumnDescriptor instances --- cqlengine/models.py | 62 ++++++++++++++++--- .../tests/model/test_class_construction.py | 2 +- 2 files changed, 54 insertions(+), 10 deletions(-) diff --git a/cqlengine/models.py b/cqlengine/models.py index 64faf6a4d1..57629260b9 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -1,9 +1,10 @@ from collections import OrderedDict import re +import cqlengine from cqlengine import columns from cqlengine.exceptions import ModelException, CQLEngineException -from cqlengine.query import QuerySet, DMLQuery +from cqlengine.query import QuerySet, DMLQuery, AbstractColumnDescriptor from cqlengine.query import DoesNotExist as _DoesNotExist from cqlengine.query import MultipleObjectsReturned as _MultipleObjectsReturned @@ -46,6 +47,56 @@ def __call__(self, *args, **kwargs): """ Just a hint to IDEs that it's ok to call this """ raise NotImplementedError +class ColumnDescriptor(AbstractColumnDescriptor): + """ + Handles the reading and writing of column values to and from + a model instance's value manager, as well as creating + comparator queries + """ + + def __init__(self, column): + """ + :param column: + :type column: columns.Column + :return: + """ + self.column = column + + def __get__(self, instance, owner): + """ + Returns either the value or column, depending + on if an instance is provided or not + + :param instance: the model instance + :type instance: Model + """ + + if instance: + return instance._values[self.column.column_name].getval() + else: + return self.column + + def __set__(self, instance, value): + """ + Sets the value on an instance, raises an exception with classes + TODO: use None instance to create update statements + """ + if instance: + return instance._values[self.column.column_name].setval(value) + else: + raise AttributeError('cannot reassign column values') + + def __delete__(self, instance): + """ + Sets the column value to None, if possible + """ + if instance: + if self.column.can_delete: + instance._values[self.column.column_name].delval() + else: + raise AttributeError('cannot delete {} columns'.format(self.column.column_name)) + + class BaseModel(object): """ The base model class, don't inherit from this, inherit from Model, defined below @@ -180,7 +231,6 @@ def _inst_batch(self, batch): batch = hybrid_classmethod(_class_batch, _inst_batch) - class ModelMetaClass(type): def __new__(cls, name, bases, attrs): @@ -207,13 +257,7 @@ def _transform_column(col_name, col_obj): primary_keys[col_name] = col_obj col_obj.set_column_name(col_name) #set properties - _get = lambda self: self._values[col_name].getval() - _set = lambda self, val: self._values[col_name].setval(val) - _del = lambda self: self._values[col_name].delval() - if col_obj.can_delete: - attrs[col_name] = property(_get, _set, _del) - else: - attrs[col_name] = property(_get, _set) + attrs[col_name] = ColumnDescriptor(col_obj) column_definitions = [(k,v) for k,v in attrs.items() if isinstance(v, columns.Column)] column_definitions = sorted(column_definitions, lambda x,y: cmp(x[1].position, y[1].position)) diff --git a/cqlengine/tests/model/test_class_construction.py b/cqlengine/tests/model/test_class_construction.py index a37b6baa3c..0739fe467f 100644 --- a/cqlengine/tests/model/test_class_construction.py +++ b/cqlengine/tests/model/test_class_construction.py @@ -253,7 +253,7 @@ def test_attempting_query_on_abstract_model_fails(self): def test_abstract_columns_are_inherited(self): """ Tests that columns defined in the abstract class are inherited into the concrete class """ assert hasattr(ConcreteModelWithCol, 'pkey') - assert isinstance(ConcreteModelWithCol.pkey, property) + assert isinstance(ConcreteModelWithCol.pkey, columns.Column) assert isinstance(ConcreteModelWithCol._columns['pkey'], columns.Column) def test_concrete_class_table_creation_cycle(self): From 36082ee04a20fb250ad9e0217c507d8e1186eb89 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sat, 15 Jun 2013 10:34:36 -0700 Subject: [PATCH 0224/3726] updating changelog --- changelog | 1 + 1 file changed, 1 insertion(+) diff --git a/changelog b/changelog index f16994fa22..a697b88734 100644 --- a/changelog +++ b/changelog @@ -4,6 +4,7 @@ CHANGELOG * removed default values from all column types * explicit primary key is required (automatic id removed) * added validation on keyname types on .create() +* changed internal implementation of model value get/set 0.3.3 * added abstract base class models From 4d21458264e239501c4aa284584050a8dc79cb58 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sat, 15 Jun 2013 11:22:12 -0700 Subject: [PATCH 0225/3726] expanding descriptor documentation --- cqlengine/models.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/cqlengine/models.py b/cqlengine/models.py index 4875d0b3c2..cf070cd644 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -29,7 +29,9 @@ def __get__(self, instance, owner): return self.instmethod.__get__(instance, owner) def __call__(self, *args, **kwargs): - """ Just a hint to IDEs that it's ok to call this """ + """ + Just a hint to IDEs that it's ok to call this + """ raise NotImplementedError class QuerySetDescriptor(object): @@ -39,12 +41,18 @@ class QuerySetDescriptor(object): """ def __get__(self, obj, model): + """ :rtype: QuerySet """ if model.__abstract__: raise CQLEngineException('cannot execute queries against abstract models') return QuerySet(model) def __call__(self, *args, **kwargs): - """ Just a hint to IDEs that it's ok to call this """ + """ + Just a hint to IDEs that it's ok to call this + + :rtype: QuerySet + """ + raise NotImplementedError raise NotImplementedError class ColumnDescriptor(AbstractColumnDescriptor): From a9c19d3120d2752985aad4ae7f83e6e8dcbc1829 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sat, 15 Jun 2013 11:26:27 -0700 Subject: [PATCH 0226/3726] implementing query method, and mode query method descriptor --- cqlengine/models.py | 19 +++++++++++++++++++ cqlengine/query.py | 16 ++++++++++++++++ 2 files changed, 35 insertions(+) diff --git a/cqlengine/models.py b/cqlengine/models.py index cf070cd644..bfa66b871c 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -53,6 +53,25 @@ def __call__(self, *args, **kwargs): :rtype: QuerySet """ raise NotImplementedError + +class QueryExpressionDescriptor(object): + """ + returns a fresh queryset /query method for the given model + it's declared on everytime it's accessed + """ + + def __get__(self, obj, model): + """ :rtype: QuerySet """ + if model.__abstract__: + raise CQLEngineException('cannot execute queries against abstract models') + return QuerySet(model).query + + def __call__(self, *args, **kwargs): + """ + Just a hint to IDEs that it's ok to call this + + :rtype: QuerySet + """ raise NotImplementedError class ColumnDescriptor(AbstractColumnDescriptor): diff --git a/cqlengine/query.py b/cqlengine/query.py index 88d16aa42a..95b6fbc35b 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -530,6 +530,13 @@ def _parse_filter_arg(self, arg): raise QueryException("Can't parse '{}'".format(arg)) def filter(self, **kwargs): + """ + Adds WHERE arguments to the queryset, returning a new queryset + + #TODO: show examples + + :rtype: QuerySet + """ #add arguments to the where clause filters clone = copy.deepcopy(self) for arg, val in kwargs.items(): @@ -555,7 +562,16 @@ def query(self, *args): """ Same end result as filter, but uses the new comparator style args ie: Model.column == val + + #TODO: show examples + + :rtype: QuerySet """ + clone = copy.deepcopy(self) + for operator in args: + clone._where.append(operator) + + return clone def get(self, **kwargs): """ From 1e6531afda1ba055de09388e0a2a0bfb725dd57d Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sat, 15 Jun 2013 11:27:36 -0700 Subject: [PATCH 0227/3726] moving column query expression evaluator out of column descriptor --- cqlengine/models.py | 14 ++++++++++++-- cqlengine/tests/model/test_class_construction.py | 4 ++-- 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/cqlengine/models.py b/cqlengine/models.py index bfa66b871c..808938e067 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -74,7 +74,15 @@ def __call__(self, *args, **kwargs): """ raise NotImplementedError -class ColumnDescriptor(AbstractColumnDescriptor): +class ColumnQueryEvaluator(AbstractColumnDescriptor): + + def __init__(self, column): + self.column = column + + def _get_column(self): + return self.column + +class ColumnDescriptor(object): """ Handles the reading and writing of column values to and from a model instance's value manager, as well as creating @@ -88,6 +96,7 @@ def __init__(self, column): :return: """ self.column = column + self.query_evaluator = ColumnQueryEvaluator(self.column) def __get__(self, instance, owner): """ @@ -101,7 +110,7 @@ def __get__(self, instance, owner): if instance: return instance._values[self.column.column_name].getval() else: - return self.column + return self.query_evaluator def __set__(self, instance, value): """ @@ -133,6 +142,7 @@ class DoesNotExist(_DoesNotExist): pass class MultipleObjectsReturned(_MultipleObjectsReturned): pass objects = QuerySetDescriptor() + query = QueryExpressionDescriptor() #table names will be generated automatically from it's model and package name #however, you can also define them manually here diff --git a/cqlengine/tests/model/test_class_construction.py b/cqlengine/tests/model/test_class_construction.py index 155fc17e1d..25cd590604 100644 --- a/cqlengine/tests/model/test_class_construction.py +++ b/cqlengine/tests/model/test_class_construction.py @@ -3,7 +3,7 @@ from cqlengine.tests.base import BaseCassEngTestCase from cqlengine.exceptions import ModelException, CQLEngineException -from cqlengine.models import Model, ModelDefinitionException +from cqlengine.models import Model, ModelDefinitionException, ColumnQueryEvaluator from cqlengine import columns import cqlengine @@ -270,7 +270,7 @@ def test_attempting_query_on_abstract_model_fails(self): def test_abstract_columns_are_inherited(self): """ Tests that columns defined in the abstract class are inherited into the concrete class """ assert hasattr(ConcreteModelWithCol, 'pkey') - assert isinstance(ConcreteModelWithCol.pkey, columns.Column) + assert isinstance(ConcreteModelWithCol.pkey, ColumnQueryEvaluator) assert isinstance(ConcreteModelWithCol._columns['pkey'], columns.Column) def test_concrete_class_table_creation_cycle(self): From c3c21fa1d4b5459617292710dfb083ac38584cda Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sat, 15 Jun 2013 11:27:56 -0700 Subject: [PATCH 0228/3726] adding test around query expressions --- cqlengine/tests/query/test_queryset.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/cqlengine/tests/query/test_queryset.py b/cqlengine/tests/query/test_queryset.py index 17954d184a..549eec6869 100644 --- a/cqlengine/tests/query/test_queryset.py +++ b/cqlengine/tests/query/test_queryset.py @@ -52,6 +52,22 @@ def test_query_filter_parsing(self): assert isinstance(op, query.GreaterThanOrEqualOperator) assert op.value == 1 + def test_query_expression_parsing(self): + """ Tests that query experessions are evaluated properly """ + query1 = TestModel.query(TestModel.test_id == 5) + assert len(query1._where) == 1 + + op = query1._where[0] + assert isinstance(op, query.EqualsOperator) + assert op.value == 5 + + query2 = query1.query(TestModel.expected_result >= 1) + assert len(query2._where) == 2 + + op = query2._where[1] + assert isinstance(op, query.GreaterThanOrEqualOperator) + assert op.value == 1 + def test_using_invalid_column_names_in_filter_kwargs_raises_error(self): """ Tests that using invalid or nonexistant column names for filter args raises an error From 0d251d170a1a2be5d8d76a0746602601de3f938a Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sat, 15 Jun 2013 11:33:02 -0700 Subject: [PATCH 0229/3726] adding query method argument validation and supporting tests --- cqlengine/query.py | 2 ++ cqlengine/tests/query/test_queryset.py | 16 +++++++++++++++- 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index 95b6fbc35b..efeb40e0ca 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -569,6 +569,8 @@ def query(self, *args): """ clone = copy.deepcopy(self) for operator in args: + if not isinstance(operator, QueryOperator): + raise QueryException('{} is not a valid query operator'.format(operator)) clone._where.append(operator) return clone diff --git a/cqlengine/tests/query/test_queryset.py b/cqlengine/tests/query/test_queryset.py index 549eec6869..7208075ea2 100644 --- a/cqlengine/tests/query/test_queryset.py +++ b/cqlengine/tests/query/test_queryset.py @@ -73,7 +73,21 @@ def test_using_invalid_column_names_in_filter_kwargs_raises_error(self): Tests that using invalid or nonexistant column names for filter args raises an error """ with self.assertRaises(query.QueryException): - query0 = TestModel.objects(nonsense=5) + TestModel.objects(nonsense=5) + + def test_using_nonexistant_column_names_in_query_args_raises_error(self): + """ + Tests that using invalid or nonexistant columns for query args raises an error + """ + with self.assertRaises(AttributeError): + TestModel.query(TestModel.nonsense == 5) + + def test_using_non_query_operators_in_query_args_raises_error(self): + """ + Tests that providing query args that are not query operator instances raises an error + """ + with self.assertRaises(query.QueryException): + TestModel.query(5) def test_where_clause_generation(self): """ From 286da25d9aa8d7528217a4c7497cc3dbcd4111d1 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sat, 15 Jun 2013 11:36:43 -0700 Subject: [PATCH 0230/3726] adding more tests around query method behavior --- cqlengine/tests/query/test_queryset.py | 27 +++++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/cqlengine/tests/query/test_queryset.py b/cqlengine/tests/query/test_queryset.py index 7208075ea2..3d9cca8abb 100644 --- a/cqlengine/tests/query/test_queryset.py +++ b/cqlengine/tests/query/test_queryset.py @@ -89,7 +89,7 @@ def test_using_non_query_operators_in_query_args_raises_error(self): with self.assertRaises(query.QueryException): TestModel.query(5) - def test_where_clause_generation(self): + def test_filter_method_where_clause_generation(self): """ Tests the where clause creation """ @@ -103,6 +103,19 @@ def test_where_clause_generation(self): where = query2._where_clause() assert where == '"test_id" = :{} AND "expected_result" >= :{}'.format(*ids) + def test_query_method_where_clause_generation(self): + """ + Tests the where clause creation + """ + query1 = TestModel.query(TestModel.test_id == 5) + ids = [o.query_value.identifier for o in query1._where] + where = query1._where_clause() + assert where == '"test_id" = :{}'.format(*ids) + + query2 = query1.query(TestModel.expected_result >= 1) + ids = [o.query_value.identifier for o in query2._where] + where = query2._where_clause() + assert where == '"test_id" = :{} AND "expected_result" >= :{}'.format(*ids) def test_querystring_generation(self): """ @@ -118,6 +131,18 @@ def test_queryset_is_immutable(self): query2 = query1.filter(expected_result__gte=1) assert len(query2._where) == 2 + assert len(query1._where) == 1 + + def test_querymethod_queryset_is_immutable(self): + """ + Tests that calling a queryset function that changes it's state returns a new queryset + """ + query1 = TestModel.query(TestModel.test_id == 5) + assert len(query1._where) == 1 + + query2 = query1.query(TestModel.expected_result >= 1) + assert len(query2._where) == 2 + assert len(query1._where) == 1 def test_the_all_method_duplicates_queryset(self): """ From 83d904f58f76dd628f1c97392dd353bd3c7cdc22 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sat, 15 Jun 2013 11:45:11 -0700 Subject: [PATCH 0231/3726] adding additional tests around query method --- cqlengine/tests/query/test_queryset.py | 63 ++++++++++++++++++-------- 1 file changed, 43 insertions(+), 20 deletions(-) diff --git a/cqlengine/tests/query/test_queryset.py b/cqlengine/tests/query/test_queryset.py index 3d9cca8abb..230c1dd06f 100644 --- a/cqlengine/tests/query/test_queryset.py +++ b/cqlengine/tests/query/test_queryset.py @@ -218,12 +218,21 @@ def tearDownClass(cls): class TestQuerySetCountSelectionAndIteration(BaseQuerySetUsage): def test_count(self): + """ Tests that adding filtering statements affects the count query as expected """ assert TestModel.objects.count() == 12 q = TestModel.objects(test_id=0) assert q.count() == 4 + def test_query_method_count(self): + """ Tests that adding query statements affects the count query as expected """ + assert TestModel.objects.count() == 12 + + q = TestModel.query(TestModel.test_id == 0) + assert q.count() == 4 + def test_iteration(self): + """ Tests that iterating over a query set pulls back all of the expected results """ q = TestModel.objects(test_id=0) #tuple of expected attempt_id, expected_result values compare_set = set([(0,5), (1,10), (2,15), (3,20)]) @@ -233,6 +242,7 @@ def test_iteration(self): compare_set.remove(val) assert len(compare_set) == 0 + # test with regular filtering q = TestModel.objects(attempt_id=3).allow_filtering() assert len(q) == 3 #tuple of expected test_id, expected_result values @@ -243,36 +253,49 @@ def test_iteration(self): compare_set.remove(val) assert len(compare_set) == 0 - def test_multiple_iterations_work_properly(self): - """ Tests that iterating over a query set more than once works """ - q = TestModel.objects(test_id=0) - #tuple of expected attempt_id, expected_result values - compare_set = set([(0,5), (1,10), (2,15), (3,20)]) + # test with query method + q = TestModel.query(TestModel.attempt_id == 3).allow_filtering() + assert len(q) == 3 + #tuple of expected test_id, expected_result values + compare_set = set([(0,20), (1,20), (2,75)]) for t in q: - val = t.attempt_id, t.expected_result + val = t.test_id, t.expected_result assert val in compare_set compare_set.remove(val) assert len(compare_set) == 0 - #try it again - compare_set = set([(0,5), (1,10), (2,15), (3,20)]) - for t in q: - val = t.attempt_id, t.expected_result - assert val in compare_set - compare_set.remove(val) - assert len(compare_set) == 0 + def test_multiple_iterations_work_properly(self): + """ Tests that iterating over a query set more than once works """ + # test with both the filtering method and the query method + for q in (TestModel.objects(test_id=0), TestModel.query(TestModel.test_id==0)): + #tuple of expected attempt_id, expected_result values + compare_set = set([(0,5), (1,10), (2,15), (3,20)]) + for t in q: + val = t.attempt_id, t.expected_result + assert val in compare_set + compare_set.remove(val) + assert len(compare_set) == 0 + + #try it again + compare_set = set([(0,5), (1,10), (2,15), (3,20)]) + for t in q: + val = t.attempt_id, t.expected_result + assert val in compare_set + compare_set.remove(val) + assert len(compare_set) == 0 def test_multiple_iterators_are_isolated(self): """ tests that the use of one iterator does not affect the behavior of another """ - q = TestModel.objects(test_id=0).order_by('attempt_id') - expected_order = [0,1,2,3] - iter1 = iter(q) - iter2 = iter(q) - for attempt_id in expected_order: - assert iter1.next().attempt_id == attempt_id - assert iter2.next().attempt_id == attempt_id + for q in (TestModel.objects(test_id=0), TestModel.query(TestModel.test_id==0)): + q = q.order_by('attempt_id') + expected_order = [0,1,2,3] + iter1 = iter(q) + iter2 = iter(q) + for attempt_id in expected_order: + assert iter1.next().attempt_id == attempt_id + assert iter2.next().attempt_id == attempt_id def test_get_success_case(self): """ From b6701db99ed17e15c3e26dcd82fb061ca3dfbf29 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sat, 15 Jun 2013 11:54:23 -0700 Subject: [PATCH 0232/3726] rolling all query types into the filter method and removing the query method --- cqlengine/models.py | 29 ++++------------------- cqlengine/query.py | 32 ++++++++------------------ cqlengine/tests/query/test_queryset.py | 24 +++++++++---------- 3 files changed, 27 insertions(+), 58 deletions(-) diff --git a/cqlengine/models.py b/cqlengine/models.py index 808938e067..afc2678ddd 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -34,6 +34,7 @@ def __call__(self, *args, **kwargs): """ raise NotImplementedError + class QuerySetDescriptor(object): """ returns a fresh queryset for the given model @@ -54,25 +55,6 @@ def __call__(self, *args, **kwargs): """ raise NotImplementedError -class QueryExpressionDescriptor(object): - """ - returns a fresh queryset /query method for the given model - it's declared on everytime it's accessed - """ - - def __get__(self, obj, model): - """ :rtype: QuerySet """ - if model.__abstract__: - raise CQLEngineException('cannot execute queries against abstract models') - return QuerySet(model).query - - def __call__(self, *args, **kwargs): - """ - Just a hint to IDEs that it's ok to call this - - :rtype: QuerySet - """ - raise NotImplementedError class ColumnQueryEvaluator(AbstractColumnDescriptor): @@ -142,7 +124,6 @@ class DoesNotExist(_DoesNotExist): pass class MultipleObjectsReturned(_MultipleObjectsReturned): pass objects = QuerySetDescriptor() - query = QueryExpressionDescriptor() #table names will be generated automatically from it's model and package name #however, you can also define them manually here @@ -239,12 +220,12 @@ def all(cls): return cls.objects.all() @classmethod - def filter(cls, **kwargs): - return cls.objects.filter(**kwargs) + def filter(cls, *args, **kwargs): + return cls.objects.filter(*args, **kwargs) @classmethod - def get(cls, **kwargs): - return cls.objects.get(**kwargs) + def get(cls, *args, **kwargs): + return cls.objects.get(*args, **kwargs) def save(self): is_new = self.pk is None diff --git a/cqlengine/query.py b/cqlengine/query.py index efeb40e0ca..47eabf9ade 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -316,8 +316,8 @@ def __unicode__(self): def __str__(self): return str(self.__unicode__()) - def __call__(self, **kwargs): - return self.filter(**kwargs) + def __call__(self, *args, **kwargs): + return self.filter(*args, **kwargs) def __deepcopy__(self, memo): clone = self.__class__(self.model) @@ -529,7 +529,7 @@ def _parse_filter_arg(self, arg): else: raise QueryException("Can't parse '{}'".format(arg)) - def filter(self, **kwargs): + def filter(self, *args, **kwargs): """ Adds WHERE arguments to the queryset, returning a new queryset @@ -539,6 +539,11 @@ def filter(self, **kwargs): """ #add arguments to the where clause filters clone = copy.deepcopy(self) + for operator in args: + if not isinstance(operator, QueryOperator): + raise QueryException('{} is not a valid query operator'.format(operator)) + clone._where.append(operator) + for arg, val in kwargs.items(): col_name, col_op = self._parse_filter_arg(arg) #resolve column and operator @@ -558,31 +563,14 @@ def filter(self, **kwargs): return clone - def query(self, *args): - """ - Same end result as filter, but uses the new comparator style args - ie: Model.column == val - - #TODO: show examples - - :rtype: QuerySet - """ - clone = copy.deepcopy(self) - for operator in args: - if not isinstance(operator, QueryOperator): - raise QueryException('{} is not a valid query operator'.format(operator)) - clone._where.append(operator) - - return clone - - def get(self, **kwargs): + def get(self, *args, **kwargs): """ Returns a single instance matching this query, optionally with additional filter kwargs. A DoesNotExistError will be raised if there are no rows matching the query A MultipleObjectsFoundError will be raised if there is more than one row matching the queyr """ - if kwargs: return self.filter(**kwargs).get() + if kwargs: return self.filter(*args, **kwargs).get() self._execute_query() if len(self._result_cache) == 0: raise self.model.DoesNotExist diff --git a/cqlengine/tests/query/test_queryset.py b/cqlengine/tests/query/test_queryset.py index 230c1dd06f..0b68617869 100644 --- a/cqlengine/tests/query/test_queryset.py +++ b/cqlengine/tests/query/test_queryset.py @@ -54,14 +54,14 @@ def test_query_filter_parsing(self): def test_query_expression_parsing(self): """ Tests that query experessions are evaluated properly """ - query1 = TestModel.query(TestModel.test_id == 5) + query1 = TestModel.filter(TestModel.test_id == 5) assert len(query1._where) == 1 op = query1._where[0] assert isinstance(op, query.EqualsOperator) assert op.value == 5 - query2 = query1.query(TestModel.expected_result >= 1) + query2 = query1.filter(TestModel.expected_result >= 1) assert len(query2._where) == 2 op = query2._where[1] @@ -80,14 +80,14 @@ def test_using_nonexistant_column_names_in_query_args_raises_error(self): Tests that using invalid or nonexistant columns for query args raises an error """ with self.assertRaises(AttributeError): - TestModel.query(TestModel.nonsense == 5) + TestModel.objects(TestModel.nonsense == 5) def test_using_non_query_operators_in_query_args_raises_error(self): """ Tests that providing query args that are not query operator instances raises an error """ with self.assertRaises(query.QueryException): - TestModel.query(5) + TestModel.objects(5) def test_filter_method_where_clause_generation(self): """ @@ -107,12 +107,12 @@ def test_query_method_where_clause_generation(self): """ Tests the where clause creation """ - query1 = TestModel.query(TestModel.test_id == 5) + query1 = TestModel.objects(TestModel.test_id == 5) ids = [o.query_value.identifier for o in query1._where] where = query1._where_clause() assert where == '"test_id" = :{}'.format(*ids) - query2 = query1.query(TestModel.expected_result >= 1) + query2 = query1.filter(TestModel.expected_result >= 1) ids = [o.query_value.identifier for o in query2._where] where = query2._where_clause() assert where == '"test_id" = :{} AND "expected_result" >= :{}'.format(*ids) @@ -137,10 +137,10 @@ def test_querymethod_queryset_is_immutable(self): """ Tests that calling a queryset function that changes it's state returns a new queryset """ - query1 = TestModel.query(TestModel.test_id == 5) + query1 = TestModel.objects(TestModel.test_id == 5) assert len(query1._where) == 1 - query2 = query1.query(TestModel.expected_result >= 1) + query2 = query1.filter(TestModel.expected_result >= 1) assert len(query2._where) == 2 assert len(query1._where) == 1 @@ -228,7 +228,7 @@ def test_query_method_count(self): """ Tests that adding query statements affects the count query as expected """ assert TestModel.objects.count() == 12 - q = TestModel.query(TestModel.test_id == 0) + q = TestModel.objects(TestModel.test_id == 0) assert q.count() == 4 def test_iteration(self): @@ -254,7 +254,7 @@ def test_iteration(self): assert len(compare_set) == 0 # test with query method - q = TestModel.query(TestModel.attempt_id == 3).allow_filtering() + q = TestModel.objects(TestModel.attempt_id == 3).allow_filtering() assert len(q) == 3 #tuple of expected test_id, expected_result values compare_set = set([(0,20), (1,20), (2,75)]) @@ -267,7 +267,7 @@ def test_iteration(self): def test_multiple_iterations_work_properly(self): """ Tests that iterating over a query set more than once works """ # test with both the filtering method and the query method - for q in (TestModel.objects(test_id=0), TestModel.query(TestModel.test_id==0)): + for q in (TestModel.objects(test_id=0), TestModel.objects(TestModel.test_id==0)): #tuple of expected attempt_id, expected_result values compare_set = set([(0,5), (1,10), (2,15), (3,20)]) for t in q: @@ -288,7 +288,7 @@ def test_multiple_iterators_are_isolated(self): """ tests that the use of one iterator does not affect the behavior of another """ - for q in (TestModel.objects(test_id=0), TestModel.query(TestModel.test_id==0)): + for q in (TestModel.objects(test_id=0), TestModel.objects(TestModel.test_id==0)): q = q.order_by('attempt_id') expected_order = [0,1,2,3] iter1 = iter(q) From 264fcaeb7b277573fb6e057cb40c3bf1ba8a9416 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sat, 15 Jun 2013 12:37:36 -0700 Subject: [PATCH 0233/3726] updating the get methods to work with query expressions --- cqlengine/query.py | 4 ++- cqlengine/tests/query/test_queryset.py | 40 ++++++++++++++++---------- 2 files changed, 28 insertions(+), 16 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index 47eabf9ade..a56a8a33cc 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -570,7 +570,9 @@ def get(self, *args, **kwargs): A DoesNotExistError will be raised if there are no rows matching the query A MultipleObjectsFoundError will be raised if there is more than one row matching the queyr """ - if kwargs: return self.filter(*args, **kwargs).get() + if args or kwargs: + return self.filter(*args, **kwargs).get() + self._execute_query() if len(self._result_cache) == 0: raise self.model.DoesNotExist diff --git a/cqlengine/tests/query/test_queryset.py b/cqlengine/tests/query/test_queryset.py index 0b68617869..921da1b559 100644 --- a/cqlengine/tests/query/test_queryset.py +++ b/cqlengine/tests/query/test_queryset.py @@ -103,7 +103,7 @@ def test_filter_method_where_clause_generation(self): where = query2._where_clause() assert where == '"test_id" = :{} AND "expected_result" >= :{}'.format(*ids) - def test_query_method_where_clause_generation(self): + def test_query_expression_where_clause_generation(self): """ Tests the where clause creation """ @@ -133,17 +133,6 @@ def test_queryset_is_immutable(self): assert len(query2._where) == 2 assert len(query1._where) == 1 - def test_querymethod_queryset_is_immutable(self): - """ - Tests that calling a queryset function that changes it's state returns a new queryset - """ - query1 = TestModel.objects(TestModel.test_id == 5) - assert len(query1._where) == 1 - - query2 = query1.filter(TestModel.expected_result >= 1) - assert len(query2._where) == 2 - assert len(query1._where) == 1 - def test_the_all_method_duplicates_queryset(self): """ Tests that calling all on a queryset with previously defined filters duplicates queryset @@ -224,7 +213,7 @@ def test_count(self): q = TestModel.objects(test_id=0) assert q.count() == 4 - def test_query_method_count(self): + def test_query_expression_count(self): """ Tests that adding query statements affects the count query as expected """ assert TestModel.objects.count() == 12 @@ -267,7 +256,7 @@ def test_iteration(self): def test_multiple_iterations_work_properly(self): """ Tests that iterating over a query set more than once works """ # test with both the filtering method and the query method - for q in (TestModel.objects(test_id=0), TestModel.objects(TestModel.test_id==0)): + for q in (TestModel.objects(test_id=0), TestModel.objects(TestModel.test_id == 0)): #tuple of expected attempt_id, expected_result values compare_set = set([(0,5), (1,10), (2,15), (3,20)]) for t in q: @@ -288,7 +277,7 @@ def test_multiple_iterators_are_isolated(self): """ tests that the use of one iterator does not affect the behavior of another """ - for q in (TestModel.objects(test_id=0), TestModel.objects(TestModel.test_id==0)): + for q in (TestModel.objects(test_id=0), TestModel.objects(TestModel.test_id == 0)): q = q.order_by('attempt_id') expected_order = [0,1,2,3] iter1 = iter(q) @@ -318,6 +307,27 @@ def test_get_success_case(self): assert m.test_id == 0 assert m.attempt_id == 0 + def test_query_expression_get_success_case(self): + """ + Tests that the .get() method works on new and existing querysets + """ + m = TestModel.get(TestModel.test_id == 0, TestModel.attempt_id == 0) + assert isinstance(m, TestModel) + assert m.test_id == 0 + assert m.attempt_id == 0 + + q = TestModel.objects(TestModel.test_id == 0, TestModel.attempt_id == 0) + m = q.get() + assert isinstance(m, TestModel) + assert m.test_id == 0 + assert m.attempt_id == 0 + + q = TestModel.objects(TestModel.test_id == 0) + m = q.get(TestModel.attempt_id == 0) + assert isinstance(m, TestModel) + assert m.test_id == 0 + assert m.attempt_id == 0 + def test_get_doesnotexist_exception(self): """ Tests that get calls that don't return a result raises a DoesNotExist error From f8c4317098ebb710570f79207044067c2fd1afe6 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sat, 15 Jun 2013 12:45:36 -0700 Subject: [PATCH 0234/3726] adding additional tests around the query expression queries --- cqlengine/tests/query/test_queryset.py | 40 ++++++++++++++++++++++---- 1 file changed, 35 insertions(+), 5 deletions(-) diff --git a/cqlengine/tests/query/test_queryset.py b/cqlengine/tests/query/test_queryset.py index 921da1b559..943efb2093 100644 --- a/cqlengine/tests/query/test_queryset.py +++ b/cqlengine/tests/query/test_queryset.py @@ -361,10 +361,14 @@ def test_order_by_success_case(self): assert model.attempt_id == expect def test_ordering_by_non_second_primary_keys_fail(self): - + # kwarg filtering with self.assertRaises(query.QueryException): q = TestModel.objects(test_id=0).order_by('test_id') + # kwarg filtering + with self.assertRaises(query.QueryException): + q = TestModel.objects(TestModel.test_id == 0).order_by('test_id') + def test_ordering_by_non_primary_keys_fails(self): with self.assertRaises(query.QueryException): q = TestModel.objects(test_id=0).order_by('description') @@ -431,7 +435,7 @@ def test_primary_key_or_index_must_be_specified(self): """ with self.assertRaises(query.QueryException): q = TestModel.objects(test_result=25) - [i for i in q] + list([i for i in q]) def test_primary_key_or_index_must_have_equal_relation_filter(self): """ @@ -439,7 +443,7 @@ def test_primary_key_or_index_must_have_equal_relation_filter(self): """ with self.assertRaises(query.QueryException): q = TestModel.objects(test_id__gt=0) - [i for i in q] + list([i for i in q]) def test_indexed_field_can_be_queried(self): @@ -447,7 +451,6 @@ def test_indexed_field_can_be_queried(self): Tests that queries on an indexed field will work without any primary key relations specified """ q = IndexedTestModel.objects(test_result=25) - count = q.count() assert q.count() == 4 class TestQuerySetDelete(BaseQuerySetUsage): @@ -526,6 +529,7 @@ def test_success_case(self): TimeUUIDQueryModel.create(partition=pk, time=uuid1(), data='4') time.sleep(0.2) + # test kwarg filtering q = TimeUUIDQueryModel.filter(partition=pk, time__lte=functions.MaxTimeUUID(midpoint)) q = [d for d in q] assert len(q) == 2 @@ -539,13 +543,39 @@ def test_success_case(self): assert '3' in datas assert '4' in datas + # test query expression filtering + q = TimeUUIDQueryModel.filter( + TimeUUIDQueryModel.partition == pk, + TimeUUIDQueryModel.time <= functions.MaxTimeUUID(midpoint) + ) + q = [d for d in q] + assert len(q) == 2 + datas = [d.data for d in q] + assert '1' in datas + assert '2' in datas + + q = TimeUUIDQueryModel.filter( + TimeUUIDQueryModel.partition == pk, + TimeUUIDQueryModel.time >= functions.MinTimeUUID(midpoint) + ) + assert len(q) == 2 + datas = [d.data for d in q] + assert '3' in datas + assert '4' in datas + class TestInOperator(BaseQuerySetUsage): - def test_success_case(self): + def test_kwarg_success_case(self): + """ Tests the in operator works with the kwarg query method """ q = TestModel.filter(test_id__in=[0,1]) assert q.count() == 8 + def test_query_expression_success_case(self): + """ Tests the in operator works with the query expression query method """ + q = TestModel.filter(TestModel.test_id in [0, 1]) + assert q.count() == 8 + class TestValuesList(BaseQuerySetUsage): def test_values_list(self): From 9f94df4b8eb15a9cd27d411e4105124613be3c88 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Sat, 15 Jun 2013 12:50:12 -0700 Subject: [PATCH 0235/3726] #58 - renamed model configuration attributes to use __double_underscore__ style --- changelog | 1 + cqlengine/management.py | 2 +- cqlengine/models.py | 15 ++++++++------- cqlengine/tests/model/test_class_construction.py | 4 ++-- 4 files changed, 12 insertions(+), 10 deletions(-) diff --git a/changelog b/changelog index f16994fa22..423f2783a7 100644 --- a/changelog +++ b/changelog @@ -4,6 +4,7 @@ CHANGELOG * removed default values from all column types * explicit primary key is required (automatic id removed) * added validation on keyname types on .create() +* changed table_name to __table_name__, read_repair_chance to __read_repair_chance__, keyspace to __keyspace__ 0.3.3 * added abstract base class models diff --git a/cqlengine/management.py b/cqlengine/management.py index 90005d2bc2..b3504b919e 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -83,7 +83,7 @@ def add_column(col): qs += ['({})'.format(', '.join(qtypes))] - with_qs = ['read_repair_chance = {}'.format(model.read_repair_chance)] + with_qs = ['read_repair_chance = {}'.format(model.__read_repair_chance__)] _order = ["%s %s" % (c.db_field_name, c.clustering_order or 'ASC') for c in model._clustering_keys.values()] if _order: diff --git a/cqlengine/models.py b/cqlengine/models.py index 7240bf16ca..5e8f6ef308 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -58,11 +58,12 @@ class MultipleObjectsReturned(_MultipleObjectsReturned): pass #table names will be generated automatically from it's model and package name #however, you can also define them manually here - table_name = None + __table_name__ = None #the keyspace for this model - keyspace = None - read_repair_chance = 0.1 + __keyspace__ = None + + __read_repair_chance__ = 0.1 def __init__(self, **values): self._values = {} @@ -96,7 +97,7 @@ def _can_update(self): @classmethod def _get_keyspace(cls): """ Returns the manual keyspace, if set, otherwise the default keyspace """ - return cls.keyspace or DEFAULT_KEYSPACE + return cls.__keyspace__ or DEFAULT_KEYSPACE def __eq__(self, other): return self.as_dict() == other.as_dict() @@ -111,8 +112,8 @@ def column_family_name(cls, include_keyspace=True): otherwise, it creates it from the module and class name """ cf_name = '' - if cls.table_name: - cf_name = cls.table_name.lower() + if cls.__table_name__: + cf_name = cls.__table_name__.lower() else: camelcase = re.compile(r'([a-z])([A-Z])') ccase = lambda s: camelcase.sub(lambda v: '{}_{}'.format(v.group(1), v.group(2).lower()), s) @@ -279,7 +280,7 @@ def _transform_column(col_name, col_obj): db_map[col.db_field_name] = field_name #short circuit table_name inheritance - attrs['table_name'] = attrs.get('table_name') + attrs['table_name'] = attrs.get('__table_name__') #add management members to the class attrs['_columns'] = column_dict diff --git a/cqlengine/tests/model/test_class_construction.py b/cqlengine/tests/model/test_class_construction.py index 3e249be2e4..a3dda94f7b 100644 --- a/cqlengine/tests/model/test_class_construction.py +++ b/cqlengine/tests/model/test_class_construction.py @@ -203,8 +203,8 @@ class Model2(Model1): class TestManualTableNaming(BaseCassEngTestCase): class RenamedTest(cqlengine.Model): - keyspace = 'whatever' - table_name = 'manual_name' + __keyspace__ = 'whatever' + __table_name__ = 'manual_name' id = cqlengine.UUID(primary_key=True) data = cqlengine.Text() From 09883d6b0408424e32b4190da41271e6673d8f85 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sat, 15 Jun 2013 16:46:34 -0700 Subject: [PATCH 0236/3726] changed __contains__ to in_() --- cqlengine/query.py | 14 ++++++++------ cqlengine/tests/query/test_queryset.py | 2 +- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index a56a8a33cc..8d7a6392de 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -163,12 +163,17 @@ class AbstractColumnDescriptor(object): def _get_column(self): raise NotImplementedError - def __eq__(self, other): - return EqualsOperator(self._get_column(), other) + def in_(self, item): + """ + Returns an in operator - def __contains__(self, item): + used in where you'd typically want to use python's `in` operator + """ return InOperator(self._get_column(), item) + def __eq__(self, other): + return EqualsOperator(self._get_column(), other) + def __gt__(self, other): return GreaterThanOperator(self._get_column(), other) @@ -195,7 +200,6 @@ def cql(self): def to_database(self, val): return val -C = NamedColumnDescriptor class TableDescriptor(object): """ describes a cql table """ @@ -204,7 +208,6 @@ def __init__(self, keyspace, name): self.keyspace = keyspace self.name = name -T = TableDescriptor class KeyspaceDescriptor(object): """ Describes a cql keyspace """ @@ -219,7 +222,6 @@ def table(self, name): """ return TableDescriptor(self.name, name) -K = KeyspaceDescriptor class BatchType(object): Unlogged = 'UNLOGGED' diff --git a/cqlengine/tests/query/test_queryset.py b/cqlengine/tests/query/test_queryset.py index 943efb2093..117c87c18b 100644 --- a/cqlengine/tests/query/test_queryset.py +++ b/cqlengine/tests/query/test_queryset.py @@ -573,7 +573,7 @@ def test_kwarg_success_case(self): def test_query_expression_success_case(self): """ Tests the in operator works with the query expression query method """ - q = TestModel.filter(TestModel.test_id in [0, 1]) + q = TestModel.filter(TestModel.test_id.in_([0, 1])) assert q.count() == 8 From d50fdfab8b4fbe6fbdb732d69fe54e91af4a0ecc Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sat, 15 Jun 2013 16:55:22 -0700 Subject: [PATCH 0237/3726] renaming queryable column --- cqlengine/models.py | 12 +++++++++--- cqlengine/query.py | 4 ++-- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/cqlengine/models.py b/cqlengine/models.py index afc2678ddd..55b03dc20a 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -1,10 +1,9 @@ from collections import OrderedDict import re -import cqlengine from cqlengine import columns from cqlengine.exceptions import ModelException, CQLEngineException, ValidationError -from cqlengine.query import QuerySet, DMLQuery, AbstractColumnDescriptor +from cqlengine.query import QuerySet, DMLQuery, AbstractQueryableColumn from cqlengine.query import DoesNotExist as _DoesNotExist from cqlengine.query import MultipleObjectsReturned as _MultipleObjectsReturned @@ -56,7 +55,14 @@ def __call__(self, *args, **kwargs): raise NotImplementedError -class ColumnQueryEvaluator(AbstractColumnDescriptor): +class ColumnQueryEvaluator(AbstractQueryableColumn): + """ + Wraps a column and allows it to be used in comparator + expressions, returning query operators + + ie: + Model.column == 5 + """ def __init__(self, column): self.column = column diff --git a/cqlengine/query.py b/cqlengine/query.py index 8d7a6392de..af3a01ddfe 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -154,7 +154,7 @@ class LessThanOrEqualOperator(QueryOperator): symbol = "LTE" cql_symbol = '<=' -class AbstractColumnDescriptor(object): +class AbstractQueryableColumn(object): """ exposes cql query operators through pythons builtin comparator symbols @@ -187,7 +187,7 @@ def __le__(self, other): return LessThanOrEqualOperator(self._get_column(), other) -class NamedColumnDescriptor(AbstractColumnDescriptor): +class NamedColumnDescriptor(AbstractQueryableColumn): """ describes a named cql column """ def __init__(self, name): From 005129e6e1fd581962ccb1b9a7892613ed1e7eb7 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sat, 15 Jun 2013 17:37:04 -0700 Subject: [PATCH 0238/3726] moving named classes into their own file, and renaming them --- cqlengine/named.py | 41 +++++++++++++++++++++++++++++++++++++++++ cqlengine/query.py | 36 ------------------------------------ 2 files changed, 41 insertions(+), 36 deletions(-) create mode 100644 cqlengine/named.py diff --git a/cqlengine/named.py b/cqlengine/named.py new file mode 100644 index 0000000000..f56032486b --- /dev/null +++ b/cqlengine/named.py @@ -0,0 +1,41 @@ +from cqlengine.query import AbstractQueryableColumn + + +class NamedColumn(AbstractQueryableColumn): + """ describes a named cql column """ + + def __init__(self, name): + self.name = name + + @property + def cql(self): + return self.name + + def to_database(self, val): + return val + + +class NamedTable(object): + """ describes a cql table """ + + def __init__(self, keyspace, name): + self.keyspace = keyspace + self.name = name + + def column(self, name): + return NamedColumn(name) + + +class NamedKeyspace(object): + """ Describes a cql keyspace """ + + def __init__(self, name): + self.name = name + + def table(self, name): + """ + returns a table descriptor with the given + name that belongs to this keyspace + """ + return NamedTable(self.name, name) + diff --git a/cqlengine/query.py b/cqlengine/query.py index af3a01ddfe..a626a817ed 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -187,42 +187,6 @@ def __le__(self, other): return LessThanOrEqualOperator(self._get_column(), other) -class NamedColumnDescriptor(AbstractQueryableColumn): - """ describes a named cql column """ - - def __init__(self, name): - self.name = name - - @property - def cql(self): - return self.name - - def to_database(self, val): - return val - - -class TableDescriptor(object): - """ describes a cql table """ - - def __init__(self, keyspace, name): - self.keyspace = keyspace - self.name = name - - -class KeyspaceDescriptor(object): - """ Describes a cql keyspace """ - - def __init__(self, name): - self.name = name - - def table(self, name): - """ - returns a table descriptor with the given - name that belongs to this keyspace - """ - return TableDescriptor(self.name, name) - - class BatchType(object): Unlogged = 'UNLOGGED' Counter = 'COUNTER' From 13075bc8a56fbbed36e416c3fda850a373be3357 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sat, 15 Jun 2013 17:40:18 -0700 Subject: [PATCH 0239/3726] adding tests around name object query creation --- cqlengine/tests/query/test_named.py | 47 +++++++++++++++++++++++++++++ 1 file changed, 47 insertions(+) create mode 100644 cqlengine/tests/query/test_named.py diff --git a/cqlengine/tests/query/test_named.py b/cqlengine/tests/query/test_named.py new file mode 100644 index 0000000000..a7379b0aaa --- /dev/null +++ b/cqlengine/tests/query/test_named.py @@ -0,0 +1,47 @@ +from cqlengine import query +from cqlengine.named import NamedKeyspace +from cqlengine.tests.query.test_queryset import TestModel +from cqlengine.tests.base import BaseCassEngTestCase + + +class TestQuerySetOperation(BaseCassEngTestCase): + + @classmethod + def setUpClass(cls): + super(TestQuerySetOperation, cls).setUpClass() + cls.keyspace = NamedKeyspace('cqlengine_test') + cls.table = cls.keyspace.table('test_model') + + def test_query_filter_parsing(self): + """ + Tests the queryset filter method parses it's kwargs properly + """ + query1 = self.table.objects(test_id=5) + assert len(query1._where) == 1 + + op = query1._where[0] + assert isinstance(op, query.EqualsOperator) + assert op.value == 5 + + query2 = query1.filter(expected_result__gte=1) + assert len(query2._where) == 2 + + op = query2._where[1] + assert isinstance(op, query.GreaterThanOrEqualOperator) + assert op.value == 1 + + def test_query_expression_parsing(self): + """ Tests that query experessions are evaluated properly """ + query1 = self.table.filter(self.table.column('test_id') == 5) + assert len(query1._where) == 1 + + op = query1._where[0] + assert isinstance(op, query.EqualsOperator) + assert op.value == 5 + + query2 = query1.filter(self.table.column('expected_result') >= 1) + assert len(query2._where) == 2 + + op = query2._where[1] + assert isinstance(op, query.GreaterThanOrEqualOperator) + assert op.value == 1 From 50506f9a970966ea5544396f08385ffa0a2d0a0f Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sat, 15 Jun 2013 17:42:40 -0700 Subject: [PATCH 0240/3726] adding methods to mimic regular models --- cqlengine/named.py | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/cqlengine/named.py b/cqlengine/named.py index f56032486b..f2ab1d04d1 100644 --- a/cqlengine/named.py +++ b/cqlengine/named.py @@ -1,3 +1,4 @@ +from cqlengine.models import QuerySetDescriptor from cqlengine.query import AbstractQueryableColumn @@ -7,6 +8,9 @@ class NamedColumn(AbstractQueryableColumn): def __init__(self, name): self.name = name + def _get_column(self): + return self + @property def cql(self): return self.name @@ -25,6 +29,25 @@ def __init__(self, keyspace, name): def column(self, name): return NamedColumn(name) + __abstract__ = False + objects = QuerySetDescriptor() + + @classmethod + def create(cls, **kwargs): + return cls.objects.create(**kwargs) + + @classmethod + def all(cls): + return cls.objects.all() + + @classmethod + def filter(cls, *args, **kwargs): + return cls.objects.filter(*args, **kwargs) + + @classmethod + def get(cls, *args, **kwargs): + return cls.objects.get(*args, **kwargs) + class NamedKeyspace(object): """ Describes a cql keyspace """ From 58e38bca76169291aded8b73042c952929425683 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sat, 15 Jun 2013 17:51:14 -0700 Subject: [PATCH 0241/3726] adding fake _column --- cqlengine/named.py | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/cqlengine/named.py b/cqlengine/named.py index f2ab1d04d1..abc19ac997 100644 --- a/cqlengine/named.py +++ b/cqlengine/named.py @@ -1,3 +1,5 @@ +from collections import defaultdict, namedtuple + from cqlengine.models import QuerySetDescriptor from cqlengine.query import AbstractQueryableColumn @@ -22,16 +24,25 @@ def to_database(self, val): class NamedTable(object): """ describes a cql table """ + __abstract__ = False + + class ColumnContainer(dict): + def __missing__(self, name): + column = NamedColumn(name) + self[name] = column + return column + _columns = ColumnContainer() + + objects = QuerySetDescriptor() + def __init__(self, keyspace, name): self.keyspace = keyspace self.name = name - def column(self, name): + @classmethod + def column(cls, name): return NamedColumn(name) - __abstract__ = False - objects = QuerySetDescriptor() - @classmethod def create(cls, **kwargs): return cls.objects.create(**kwargs) From df2a769c6aca4410c4efcf3e4082468f8997d0a5 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sat, 15 Jun 2013 17:51:32 -0700 Subject: [PATCH 0242/3726] commenting out named table create, for now --- cqlengine/named.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cqlengine/named.py b/cqlengine/named.py index abc19ac997..0072c5318a 100644 --- a/cqlengine/named.py +++ b/cqlengine/named.py @@ -43,9 +43,9 @@ def __init__(self, keyspace, name): def column(cls, name): return NamedColumn(name) - @classmethod - def create(cls, **kwargs): - return cls.objects.create(**kwargs) + # @classmethod + # def create(cls, **kwargs): + # return cls.objects.create(**kwargs) @classmethod def all(cls): From e6644239bb61981f7728cf37495967ed6a26ce73 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Sat, 15 Jun 2013 22:56:57 -0700 Subject: [PATCH 0243/3726] updated docs --- docs/topics/queryset.rst | 39 ++++++++++++++++++++++++++++++--------- 1 file changed, 30 insertions(+), 9 deletions(-) diff --git a/docs/topics/queryset.rst b/docs/topics/queryset.rst index 48e26a7cba..c237ed6b59 100644 --- a/docs/topics/queryset.rst +++ b/docs/topics/queryset.rst @@ -29,9 +29,9 @@ Retrieving objects with filters That can be accomplished with the QuerySet's ``.filter(\*\*)`` method. For example, given the model definition: - + .. code-block:: python - + class Automobile(Model): manufacturer = columns.Text(primary_key=True) year = columns.Integer(primary_key=True) @@ -40,11 +40,17 @@ Retrieving objects with filters ...and assuming the Automobile table contains a record of every car model manufactured in the last 20 years or so, we can retrieve only the cars made by a single manufacturer like this: - + .. code-block:: python q = Automobile.objects.filter(manufacturer='Tesla') + You can also use the more convenient syntax: + + .. code-block:: python + + q = Automobile.objects(Automobile.manufacturer == 'Tesla') + We can then further filter our query with another call to **.filter** .. code-block:: python @@ -123,6 +129,7 @@ Filtering Operators q = Automobile.objects.filter(manufacturer='Tesla') q = q.filter(year__in=[2011, 2012]) + :attr:`> (__gt) ` .. code-block:: python @@ -130,6 +137,10 @@ Filtering Operators q = Automobile.objects.filter(manufacturer='Tesla') q = q.filter(year__gt=2010) # year > 2010 + # or the nicer syntax + + q.filter(Automobile.year > 2010) + :attr:`>= (__gte) ` .. code-block:: python @@ -137,6 +148,10 @@ Filtering Operators q = Automobile.objects.filter(manufacturer='Tesla') q = q.filter(year__gte=2010) # year >= 2010 + # or the nicer syntax + + Automobile.objects.filter(Automobile.manufacturer == 'Tesla') + :attr:`< (__lt) ` .. code-block:: python @@ -144,6 +159,10 @@ Filtering Operators q = Automobile.objects.filter(manufacturer='Tesla') q = q.filter(year__lt=2012) # year < 2012 + # or... + + q.filter(Automobile.year < 2012) + :attr:`<= (__lte) ` .. code-block:: python @@ -151,6 +170,8 @@ Filtering Operators q = Automobile.objects.filter(manufacturer='Tesla') q = q.filter(year__lte=2012) # year <= 2012 + q.filter(Automobile.year <= 2012) + TimeUUID Functions ================== @@ -220,7 +241,7 @@ Ordering QuerySets Since Cassandra is essentially a distributed hash table on steroids, the order you get records back in will not be particularly predictable. - However, you can set a column to order on with the ``.order_by(column_name)`` method. + However, you can set a column to order on with the ``.order_by(column_name)`` method. *Example* @@ -245,12 +266,12 @@ Values Lists Batch Queries =============== - cqlengine now supports batch queries using the BatchQuery class. Batch queries can be started and stopped manually, or within a context manager. To add queries to the batch object, you just need to precede the create/save/delete call with a call to batch, and pass in the batch object. - + cqlengine now supports batch queries using the BatchQuery class. Batch queries can be started and stopped manually, or within a context manager. To add queries to the batch object, you just need to precede the create/save/delete call with a call to batch, and pass in the batch object. + You can only create, update, and delete rows with a batch query, attempting to read rows out of the database with a batch query will fail. .. code-block:: python - + from cqlengine import BatchQuery #using a context manager @@ -298,7 +319,7 @@ QuerySet method reference .. method:: limit(num) Limits the number of results returned by Cassandra. - + *Note that CQL's default limit is 10,000, so all queries without a limit set explicitly will have an implicit limit of 10,000* .. method:: order_by(field_name) @@ -306,7 +327,7 @@ QuerySet method reference :param field_name: the name of the field to order on. *Note: the field_name must be a clustering key* :type field_name: string - Sets the field to order on. + Sets the field to order on. .. method:: allow_filtering() From fedb622fe426080d30aa58906aeae7ea9fad7cdb Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 16 Jun 2013 08:18:53 -0700 Subject: [PATCH 0244/3726] adding tests around where clause generation --- cqlengine/tests/query/test_named.py | 31 +++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/cqlengine/tests/query/test_named.py b/cqlengine/tests/query/test_named.py index a7379b0aaa..561083d4db 100644 --- a/cqlengine/tests/query/test_named.py +++ b/cqlengine/tests/query/test_named.py @@ -45,3 +45,34 @@ def test_query_expression_parsing(self): op = query2._where[1] assert isinstance(op, query.GreaterThanOrEqualOperator) assert op.value == 1 + + def test_filter_method_where_clause_generation(self): + """ + Tests the where clause creation + """ + query1 = self.table.objects(test_id=5) + ids = [o.query_value.identifier for o in query1._where] + where = query1._where_clause() + assert where == '"test_id" = :{}'.format(*ids) + + query2 = query1.filter(expected_result__gte=1) + ids = [o.query_value.identifier for o in query2._where] + where = query2._where_clause() + assert where == '"test_id" = :{} AND "expected_result" >= :{}'.format(*ids) + + def test_query_expression_where_clause_generation(self): + """ + Tests the where clause creation + """ + query1 = self.table.objects(self.table.column('test_id') == 5) + ids = [o.query_value.identifier for o in query1._where] + where = query1._where_clause() + assert where == '"test_id" = :{}'.format(*ids) + + query2 = query1.filter(self.table.column('expected_result') >= 1) + ids = [o.query_value.identifier for o in query2._where] + where = query2._where_clause() + assert where == '"test_id" = :{} AND "expected_result" >= :{}'.format(*ids) + + + From e87d806e5b7ce022ecb64e2dd1d242e6aa194b06 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 16 Jun 2013 08:21:04 -0700 Subject: [PATCH 0245/3726] renaming queryset --- cqlengine/models.py | 8 ++++---- cqlengine/query.py | 6 +++--- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/cqlengine/models.py b/cqlengine/models.py index 55b03dc20a..c3ae852050 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -3,7 +3,7 @@ from cqlengine import columns from cqlengine.exceptions import ModelException, CQLEngineException, ValidationError -from cqlengine.query import QuerySet, DMLQuery, AbstractQueryableColumn +from cqlengine.query import ModelQuerySet, DMLQuery, AbstractQueryableColumn from cqlengine.query import DoesNotExist as _DoesNotExist from cqlengine.query import MultipleObjectsReturned as _MultipleObjectsReturned @@ -41,16 +41,16 @@ class QuerySetDescriptor(object): """ def __get__(self, obj, model): - """ :rtype: QuerySet """ + """ :rtype: ModelQuerySet """ if model.__abstract__: raise CQLEngineException('cannot execute queries against abstract models') - return QuerySet(model) + return ModelQuerySet(model) def __call__(self, *args, **kwargs): """ Just a hint to IDEs that it's ok to call this - :rtype: QuerySet + :rtype: ModelQuerySet """ raise NotImplementedError diff --git a/cqlengine/query.py b/cqlengine/query.py index a626a817ed..1a9fa7817a 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -239,10 +239,10 @@ def __exit__(self, exc_type, exc_val, exc_tb): if exc_type is not None: return self.execute() -class QuerySet(object): +class ModelQuerySet(object): def __init__(self, model): - super(QuerySet, self).__init__() + super(ModelQuerySet, self).__init__() self.model = model #Where clause filters @@ -501,7 +501,7 @@ def filter(self, *args, **kwargs): #TODO: show examples - :rtype: QuerySet + :rtype: ModelQuerySet """ #add arguments to the where clause filters clone = copy.deepcopy(self) From 86202721ada6d1e5e8c153a9b8b038e4898363c6 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 16 Jun 2013 08:24:19 -0700 Subject: [PATCH 0246/3726] creating simple query set --- cqlengine/query.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index 1a9fa7817a..cc735f6d88 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -239,10 +239,11 @@ def __exit__(self, exc_type, exc_val, exc_tb): if exc_type is not None: return self.execute() -class ModelQuerySet(object): + +class SimpleQuerySet(object): def __init__(self, model): - super(ModelQuerySet, self).__init__() + super(SimpleQuerySet, self).__init__() self.model = model #Where clause filters @@ -501,7 +502,7 @@ def filter(self, *args, **kwargs): #TODO: show examples - :rtype: ModelQuerySet + :rtype: SimpleQuerySet """ #add arguments to the where clause filters clone = copy.deepcopy(self) @@ -700,6 +701,13 @@ def __eq__(self, q): def __ne__(self, q): return not (self != q) + +class ModelQuerySet(SimpleQuerySet): + """ + + """ + + class DMLQuery(object): """ A query object used for queries performing inserts, updates, or deletes From 8aaae66254c82cadfc3f2042be8ab6171b88b43f Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 16 Jun 2013 08:27:02 -0700 Subject: [PATCH 0247/3726] moving where clause validation off of SimpleQuerySet --- cqlengine/query.py | 45 ++++++++++++++++++++++++--------------------- 1 file changed, 24 insertions(+), 21 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index cc735f6d88..9aef5a48a6 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -314,29 +314,8 @@ def __del__(self): #----query generation / execution---- - def _validate_where_syntax(self): - """ Checks that a filterset will not create invalid cql """ - - #check that there's either a = or IN relationship with a primary key or indexed field - equal_ops = [w for w in self._where if isinstance(w, EqualsOperator)] - token_ops = [w for w in self._where if isinstance(w.value, Token)] - if not any([w.column.primary_key or w.column.index for w in equal_ops]) and not token_ops: - raise QueryException('Where clauses require either a "=" or "IN" comparison with either a primary key or indexed field') - - if not self._allow_filtering: - #if the query is not on an indexed field - if not any([w.column.index for w in equal_ops]): - if not any([w.column.partition_key for w in equal_ops]) and not token_ops: - raise QueryException('Filtering on a clustering key without a partition key is not allowed unless allow_filtering() is called on the querset') - if any(not w.column.partition_key for w in token_ops): - raise QueryException('The token() function is only supported on the partition key') - - - #TODO: abuse this to see if we can get cql to raise an exception - def _where_clause(self): """ Returns a where clause based on the given filter args """ - self._validate_where_syntax() return ' AND '.join([f.cql for f in self._where]) def _where_values(self): @@ -706,6 +685,30 @@ class ModelQuerySet(SimpleQuerySet): """ """ + def _validate_where_syntax(self): + """ Checks that a filterset will not create invalid cql """ + + #check that there's either a = or IN relationship with a primary key or indexed field + equal_ops = [w for w in self._where if isinstance(w, EqualsOperator)] + token_ops = [w for w in self._where if isinstance(w.value, Token)] + if not any([w.column.primary_key or w.column.index for w in equal_ops]) and not token_ops: + raise QueryException('Where clauses require either a "=" or "IN" comparison with either a primary key or indexed field') + + if not self._allow_filtering: + #if the query is not on an indexed field + if not any([w.column.index for w in equal_ops]): + if not any([w.column.partition_key for w in equal_ops]) and not token_ops: + raise QueryException('Filtering on a clustering key without a partition key is not allowed unless allow_filtering() is called on the querset') + if any(not w.column.partition_key for w in token_ops): + raise QueryException('The token() function is only supported on the partition key') + + + #TODO: abuse this to see if we can get cql to raise an exception + + def _where_clause(self): + """ Returns a where clause based on the given filter args """ + self._validate_where_syntax() + return super(ModelQuerySet, self)._where_clause() class DMLQuery(object): From 83c11cb735dda575617e3875bffda207fab0e963 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 16 Jun 2013 08:32:27 -0700 Subject: [PATCH 0248/3726] encapsulating queryset column retrieval --- cqlengine/models.py | 11 +++++++++++ cqlengine/query.py | 2 +- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/cqlengine/models.py b/cqlengine/models.py index c3ae852050..30a9261745 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -173,6 +173,17 @@ def _get_keyspace(cls): """ Returns the manual keyspace, if set, otherwise the default keyspace """ return cls.keyspace or DEFAULT_KEYSPACE + @classmethod + def _get_column(cls, name): + """ + Returns the column matching the given name, raising a key error if + it doesn't exist + + :param name: the name of the column to return + :rtype: Column + """ + return cls._columns[name] + def __eq__(self, other): return self.as_dict() == other.as_dict() diff --git a/cqlengine/query.py b/cqlengine/query.py index 9aef5a48a6..a846c8c0c6 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -494,7 +494,7 @@ def filter(self, *args, **kwargs): col_name, col_op = self._parse_filter_arg(arg) #resolve column and operator try: - column = self.model._columns[col_name] + column = self.model._get_column(col_name) except KeyError: if col_name == 'pk__token': column = columns._PartitionKeysToken(self.model) From 9c56f3181a10f79d7c8449dd58b949f67e1d98b0 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 16 Jun 2013 08:35:48 -0700 Subject: [PATCH 0249/3726] switching named columns from model queryset to simple queryset --- cqlengine/named.py | 41 +++++++++++++++++++++++++++++++---------- 1 file changed, 31 insertions(+), 10 deletions(-) diff --git a/cqlengine/named.py b/cqlengine/named.py index 0072c5318a..fc0eb0e01f 100644 --- a/cqlengine/named.py +++ b/cqlengine/named.py @@ -1,7 +1,26 @@ -from collections import defaultdict, namedtuple +from cqlengine.exceptions import CQLEngineException +from cqlengine.query import AbstractQueryableColumn, SimpleQuerySet -from cqlengine.models import QuerySetDescriptor -from cqlengine.query import AbstractQueryableColumn + +class QuerySetDescriptor(object): + """ + returns a fresh queryset for the given model + it's declared on everytime it's accessed + """ + + def __get__(self, obj, model): + """ :rtype: ModelQuerySet """ + if model.__abstract__: + raise CQLEngineException('cannot execute queries against abstract models') + return SimpleQuerySet(model) + + def __call__(self, *args, **kwargs): + """ + Just a hint to IDEs that it's ok to call this + + :rtype: ModelQuerySet + """ + raise NotImplementedError class NamedColumn(AbstractQueryableColumn): @@ -26,13 +45,6 @@ class NamedTable(object): __abstract__ = False - class ColumnContainer(dict): - def __missing__(self, name): - column = NamedColumn(name) - self[name] = column - return column - _columns = ColumnContainer() - objects = QuerySetDescriptor() def __init__(self, keyspace, name): @@ -43,6 +55,15 @@ def __init__(self, keyspace, name): def column(cls, name): return NamedColumn(name) + @classmethod + def _get_column(cls, name): + """ + Returns the column matching the given name + + :rtype: Column + """ + return cls.column(name) + # @classmethod # def create(cls, **kwargs): # return cls.objects.create(**kwargs) From 23ec346b82a873f9919e6fb7f4602d1e77072960 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 16 Jun 2013 08:37:03 -0700 Subject: [PATCH 0250/3726] updating the named object doc strings --- cqlengine/named.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/cqlengine/named.py b/cqlengine/named.py index fc0eb0e01f..5e092156bf 100644 --- a/cqlengine/named.py +++ b/cqlengine/named.py @@ -24,7 +24,9 @@ def __call__(self, *args, **kwargs): class NamedColumn(AbstractQueryableColumn): - """ describes a named cql column """ + """ + A column that is not coupled to a model class, or type + """ def __init__(self, name): self.name = name @@ -41,7 +43,9 @@ def to_database(self, val): class NamedTable(object): - """ describes a cql table """ + """ + A Table that is not coupled to a model class + """ __abstract__ = False @@ -82,7 +86,9 @@ def get(cls, *args, **kwargs): class NamedKeyspace(object): - """ Describes a cql keyspace """ + """ + A keyspace + """ def __init__(self, name): self.name = name From 6d42ab38ed821bdf317fd42d15f4f4d1a5d20f1d Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 16 Jun 2013 08:40:36 -0700 Subject: [PATCH 0251/3726] expanding cql methods on named column --- cqlengine/named.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/cqlengine/named.py b/cqlengine/named.py index 5e092156bf..94a3c05e90 100644 --- a/cqlengine/named.py +++ b/cqlengine/named.py @@ -36,7 +36,10 @@ def _get_column(self): @property def cql(self): - return self.name + return self.get_cql() + + def get_cql(self): + return '"{}"'.format(self.name) def to_database(self, val): return val From e9d951b671d6b7035bd68bec4f1e999cde943ba9 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 16 Jun 2013 09:14:37 -0700 Subject: [PATCH 0252/3726] making named table operations instance specific --- cqlengine/named.py | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/cqlengine/named.py b/cqlengine/named.py index 94a3c05e90..9b62efcb39 100644 --- a/cqlengine/named.py +++ b/cqlengine/named.py @@ -12,7 +12,7 @@ def __get__(self, obj, model): """ :rtype: ModelQuerySet """ if model.__abstract__: raise CQLEngineException('cannot execute queries against abstract models') - return SimpleQuerySet(model) + return SimpleQuerySet(obj) def __call__(self, *args, **kwargs): """ @@ -58,11 +58,19 @@ def __init__(self, keyspace, name): self.keyspace = keyspace self.name = name - @classmethod def column(cls, name): return NamedColumn(name) - @classmethod + def column_family_name(self, include_keyspace=True): + """ + Returns the column family name if it's been defined + otherwise, it creates it from the module and class name + """ + if include_keyspace: + return '{}.{}'.format(self.keyspace, self.name) + else: + return self.name + def _get_column(cls, name): """ Returns the column matching the given name @@ -75,15 +83,12 @@ def _get_column(cls, name): # def create(cls, **kwargs): # return cls.objects.create(**kwargs) - @classmethod def all(cls): return cls.objects.all() - @classmethod def filter(cls, *args, **kwargs): return cls.objects.filter(*args, **kwargs) - @classmethod def get(cls, *args, **kwargs): return cls.objects.get(*args, **kwargs) From cc3b08bdd6e6a7d45bd8b4f895212dd2f1284345 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 16 Jun 2013 09:17:13 -0700 Subject: [PATCH 0253/3726] renaming cls to self --- cqlengine/named.py | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/cqlengine/named.py b/cqlengine/named.py index 9b62efcb39..f2301bbb03 100644 --- a/cqlengine/named.py +++ b/cqlengine/named.py @@ -58,7 +58,7 @@ def __init__(self, keyspace, name): self.keyspace = keyspace self.name = name - def column(cls, name): + def column(self, name): return NamedColumn(name) def column_family_name(self, include_keyspace=True): @@ -71,26 +71,25 @@ def column_family_name(self, include_keyspace=True): else: return self.name - def _get_column(cls, name): + def _get_column(self, name): """ Returns the column matching the given name :rtype: Column """ - return cls.column(name) + return self.column(name) - # @classmethod - # def create(cls, **kwargs): - # return cls.objects.create(**kwargs) + # def create(self, **kwargs): + # return self.objects.create(**kwargs) - def all(cls): - return cls.objects.all() + def all(self): + return self.objects.all() - def filter(cls, *args, **kwargs): - return cls.objects.filter(*args, **kwargs) + def filter(self, *args, **kwargs): + return self.objects.filter(*args, **kwargs) - def get(cls, *args, **kwargs): - return cls.objects.get(*args, **kwargs) + def get(self, *args, **kwargs): + return self.objects.get(*args, **kwargs) class NamedKeyspace(object): From def580706533dd3814d4d108a12cbc3b95b53b22 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 16 Jun 2013 09:22:59 -0700 Subject: [PATCH 0254/3726] =?UTF-8?q?adding=20tests=20around=20named=20tab?= =?UTF-8?q?le=20queries=E2=80=A8?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- cqlengine/tests/query/test_named.py | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/cqlengine/tests/query/test_named.py b/cqlengine/tests/query/test_named.py index 561083d4db..6f1327f094 100644 --- a/cqlengine/tests/query/test_named.py +++ b/cqlengine/tests/query/test_named.py @@ -1,6 +1,6 @@ from cqlengine import query from cqlengine.named import NamedKeyspace -from cqlengine.tests.query.test_queryset import TestModel +from cqlengine.tests.query.test_queryset import TestModel, BaseQuerySetUsage from cqlengine.tests.base import BaseCassEngTestCase @@ -75,4 +75,28 @@ def test_query_expression_where_clause_generation(self): assert where == '"test_id" = :{} AND "expected_result" >= :{}'.format(*ids) +class TestQuerySetCountSelectionAndIteration(BaseQuerySetUsage): + + @classmethod + def setUpClass(cls): + super(TestQuerySetCountSelectionAndIteration, cls).setUpClass() + ks,tn = TestModel.column_family_name().split('.') + cls.keyspace = NamedKeyspace(ks) + cls.table = cls.keyspace.table(tn) + + + def test_count(self): + """ Tests that adding filtering statements affects the count query as expected """ + assert self.table.objects.count() == 12 + + q = self.table.objects(test_id=0) + assert q.count() == 4 + + def test_query_expression_count(self): + """ Tests that adding query statements affects the count query as expected """ + assert self.table.objects.count() == 12 + + q = self.table.objects(self.table.column('test_id') == 0) + assert q.count() == 4 + From cc40938b592a8d95922e3e321adb07a3a9f92723 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 16 Jun 2013 09:27:41 -0700 Subject: [PATCH 0255/3726] splitting the simple queryset into an abstract and simple queryset --- cqlengine/query.py | 37 +++++++++++++++++++++++++++---------- 1 file changed, 27 insertions(+), 10 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index a846c8c0c6..b54fcbf30a 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -240,10 +240,10 @@ def __exit__(self, exc_type, exc_val, exc_tb): self.execute() -class SimpleQuerySet(object): +class AbstractQuerySet(object): def __init__(self, model): - super(SimpleQuerySet, self).__init__() + super(AbstractQuerySet, self).__init__() self.model = model #Where clause filters @@ -325,16 +325,15 @@ def _where_values(self): values.update(where.get_dict()) return values + def _get_select_fields(self): + """ Returns the fields to be returned by the select query """ + raise NotImplementedError + def _select_query(self): """ Returns a select clause based on the given filter args """ - fields = self.model._columns.keys() - if self._defer_fields: - fields = [f for f in fields if f not in self._defer_fields] - elif self._only_fields: - fields = self._only_fields - db_fields = [self.model._columns[f].db_field_name for f in fields] + db_fields = self._get_select_fields() qs = ['SELECT {}'.format(', '.join(['"{}"'.format(f) for f in db_fields]))] qs += ['FROM {}'.format(self.column_family_name)] @@ -481,7 +480,7 @@ def filter(self, *args, **kwargs): #TODO: show examples - :rtype: SimpleQuerySet + :rtype: AbstractQuerySet """ #add arguments to the where clause filters clone = copy.deepcopy(self) @@ -680,8 +679,17 @@ def __eq__(self, q): def __ne__(self, q): return not (self != q) +class SimpleQuerySet(AbstractQuerySet): + """ + + """ + + def _get_select_fields(self): + """ Returns the fields to be returned by the select query """ + return ['*'] + -class ModelQuerySet(SimpleQuerySet): +class ModelQuerySet(AbstractQuerySet): """ """ @@ -710,6 +718,15 @@ def _where_clause(self): self._validate_where_syntax() return super(ModelQuerySet, self)._where_clause() + def _get_select_fields(self): + """ Returns the fields to be returned by the select query """ + fields = self.model._columns.keys() + if self._defer_fields: + fields = [f for f in fields if f not in self._defer_fields] + elif self._only_fields: + fields = self._only_fields + return [self.model._columns[f].db_field_name for f in fields] + class DMLQuery(object): """ From df964ec3a5543d670f775b96159e520d5505e572 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 16 Jun 2013 09:37:42 -0700 Subject: [PATCH 0256/3726] moving the select statement into it's own method --- cqlengine/query.py | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index b54fcbf30a..5a87d94965 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -325,17 +325,15 @@ def _where_values(self): values.update(where.get_dict()) return values - def _get_select_fields(self): - """ Returns the fields to be returned by the select query """ + def _get_select_statement(self): + """ returns the select portion of this queryset's cql statement """ raise NotImplementedError def _select_query(self): """ Returns a select clause based on the given filter args """ - db_fields = self._get_select_fields() - - qs = ['SELECT {}'.format(', '.join(['"{}"'.format(f) for f in db_fields]))] + qs = [self._get_select_statement()] qs += ['FROM {}'.format(self.column_family_name)] if self._where: @@ -684,9 +682,9 @@ class SimpleQuerySet(AbstractQuerySet): """ - def _get_select_fields(self): + def _get_select_statement(self): """ Returns the fields to be returned by the select query """ - return ['*'] + return 'SELECT *' class ModelQuerySet(AbstractQuerySet): @@ -718,14 +716,15 @@ def _where_clause(self): self._validate_where_syntax() return super(ModelQuerySet, self)._where_clause() - def _get_select_fields(self): + def _get_select_statement(self): """ Returns the fields to be returned by the select query """ fields = self.model._columns.keys() if self._defer_fields: fields = [f for f in fields if f not in self._defer_fields] elif self._only_fields: fields = self._only_fields - return [self.model._columns[f].db_field_name for f in fields] + db_fields = [self.model._columns[f].db_field_name for f in fields] + return 'SELECT {}'.format(', '.join(['"{}"'.format(f) for f in db_fields])) class DMLQuery(object): From fbe3294554a1c1e2b498de4298829fd6d6d33cfa Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 16 Jun 2013 09:48:07 -0700 Subject: [PATCH 0257/3726] creating different result constructor factory methods for each queryset type --- cqlengine/query.py | 71 +++++++++++++++++++++++++++------------------- 1 file changed, 42 insertions(+), 29 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index 5a87d94965..06d2f2bfe4 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -420,22 +420,7 @@ def _create_result_constructor(self, names): """ Returns a function that will be used to instantiate query results """ - model = self.model - db_map = model._db_map - if not self._values_list: - def _construct_instance(values): - field_dict = dict((db_map.get(k, k), v) for k, v in zip(names, values)) - instance = model(**field_dict) - instance._is_persisted = True - return instance - return _construct_instance - - columns = [model._columns[db_map[name]] for name in names] - if self._flat_values_list: - return (lambda values: columns[0].to_python(values[0])) - else: - # result_cls = namedtuple("{}Tuple".format(self.model.__name__), names) - return (lambda values: map(lambda (c, v): c.to_python(v), zip(columns, values))) + raise NotImplementedError def batch(self, batch_obj): """ @@ -658,19 +643,6 @@ def delete(self, columns=[]): else: execute(qs, self._where_values()) - def values_list(self, *fields, **kwargs): - """ Instructs the query set to return tuples, not model instance """ - flat = kwargs.pop('flat', False) - if kwargs: - raise TypeError('Unexpected keyword arguments to values_list: %s' - % (kwargs.keys(),)) - if flat and len(fields) > 1: - raise TypeError("'flat' is not valid when values_list is called with more than one field.") - clone = self.only(fields) - clone._values_list = True - clone._flat_values_list = flat - return clone - def __eq__(self, q): return set(self._where) == set(q._where) @@ -686,6 +658,13 @@ def _get_select_statement(self): """ Returns the fields to be returned by the select query """ return 'SELECT *' + def _create_result_constructor(self, names): + """ + Returns a function that will be used to instantiate query results + """ + def _construct_instance(values): + return dict(zip(names, values)) + return _construct_instance class ModelQuerySet(AbstractQuerySet): """ @@ -726,6 +705,40 @@ def _get_select_statement(self): db_fields = [self.model._columns[f].db_field_name for f in fields] return 'SELECT {}'.format(', '.join(['"{}"'.format(f) for f in db_fields])) + def _create_result_constructor(self, names): + """ + Returns a function that will be used to instantiate query results + """ + model = self.model + db_map = model._db_map + if not self._values_list: + def _construct_instance(values): + field_dict = dict((db_map.get(k, k), v) for k, v in zip(names, values)) + instance = model(**field_dict) + instance._is_persisted = True + return instance + return _construct_instance + + columns = [model._columns[db_map[name]] for name in names] + if self._flat_values_list: + return (lambda values: columns[0].to_python(values[0])) + else: + # result_cls = namedtuple("{}Tuple".format(self.model.__name__), names) + return (lambda values: map(lambda (c, v): c.to_python(v), zip(columns, values))) + + def values_list(self, *fields, **kwargs): + """ Instructs the query set to return tuples, not model instance """ + flat = kwargs.pop('flat', False) + if kwargs: + raise TypeError('Unexpected keyword arguments to values_list: %s' + % (kwargs.keys(),)) + if flat and len(fields) > 1: + raise TypeError("'flat' is not valid when values_list is called with more than one field.") + clone = self.only(fields) + clone._values_list = True + clone._flat_values_list = flat + return clone + class DMLQuery(object): """ From d741790ec78b90dd66a19c328670aec84b60c01c Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 16 Jun 2013 09:52:37 -0700 Subject: [PATCH 0258/3726] changing named query results to Result Object instance --- cqlengine/query.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index 06d2f2bfe4..d3b9d01023 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -649,6 +649,17 @@ def __eq__(self, q): def __ne__(self, q): return not (self != q) +class ResultObject(dict): + """ + adds attribute access to a dictionary + """ + + def __getattr__(self, item): + try: + return self[item] + except KeyError: + raise AttributeError + class SimpleQuerySet(AbstractQuerySet): """ @@ -663,7 +674,7 @@ def _create_result_constructor(self, names): Returns a function that will be used to instantiate query results """ def _construct_instance(values): - return dict(zip(names, values)) + return ResultObject(zip(names, values)) return _construct_instance class ModelQuerySet(AbstractQuerySet): From 5c1e203f099470d8d66ec44be8446b04d8461169 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 16 Jun 2013 09:58:56 -0700 Subject: [PATCH 0259/3726] adding does not exist and multiple objects returns exceptions to named table --- cqlengine/named.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/cqlengine/named.py b/cqlengine/named.py index f2301bbb03..c1d1263a82 100644 --- a/cqlengine/named.py +++ b/cqlengine/named.py @@ -1,6 +1,8 @@ from cqlengine.exceptions import CQLEngineException from cqlengine.query import AbstractQueryableColumn, SimpleQuerySet +from cqlengine.query import DoesNotExist as _DoesNotExist +from cqlengine.query import MultipleObjectsReturned as _MultipleObjectsReturned class QuerySetDescriptor(object): """ @@ -54,6 +56,9 @@ class NamedTable(object): objects = QuerySetDescriptor() + class DoesNotExist(_DoesNotExist): pass + class MultipleObjectsReturned(_MultipleObjectsReturned): pass + def __init__(self, keyspace, name): self.keyspace = keyspace self.name = name From 6ccc32084c208d612eca7f577962b79f5cf7714e Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 16 Jun 2013 10:07:57 -0700 Subject: [PATCH 0260/3726] moving order conditions into their own method and adding extra validation to the model queryset order condition method --- cqlengine/query.py | 44 ++++++++++++++++++++++++++------------------ 1 file changed, 26 insertions(+), 18 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index d3b9d01023..b01e1a9ea3 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -510,6 +510,12 @@ def get(self, *args, **kwargs): else: return self[0] + def _get_ordering_condition(self, colname): + order_type = 'DESC' if colname.startswith('-') else 'ASC' + colname = colname.replace('-', '') + + return colname, order_type + def order_by(self, *colnames): """ orders the result set. @@ -524,24 +530,7 @@ def order_by(self, *colnames): conditions = [] for colname in colnames: - order_type = 'DESC' if colname.startswith('-') else 'ASC' - colname = colname.replace('-', '') - - column = self.model._columns.get(colname) - if column is None: - raise QueryException("Can't resolve the column name: '{}'".format(colname)) - - #validate the column selection - if not column.primary_key: - raise QueryException( - "Can't order on '{}', can only order on (clustered) primary keys".format(colname)) - - pks = [v for k, v in self.model._columns.items() if v.primary_key] - if column == pks[0]: - raise QueryException( - "Can't order by the first primary key (partition key), clustering (secondary) keys only") - - conditions.append('"{}" {}'.format(column.db_field_name, order_type)) + conditions.append('"{}" {}'.format(*self._get_ordering_condition(colname))) clone = copy.deepcopy(self) clone._order.extend(conditions) @@ -737,6 +726,25 @@ def _construct_instance(values): # result_cls = namedtuple("{}Tuple".format(self.model.__name__), names) return (lambda values: map(lambda (c, v): c.to_python(v), zip(columns, values))) + def _get_ordering_condition(self, colname): + colname, order_type = super(ModelQuerySet, self)._get_ordering_condition(colname) + + column = self.model._columns.get(colname) + if column is None: + raise QueryException("Can't resolve the column name: '{}'".format(colname)) + + #validate the column selection + if not column.primary_key: + raise QueryException( + "Can't order on '{}', can only order on (clustered) primary keys".format(colname)) + + pks = [v for k, v in self.model._columns.items() if v.primary_key] + if column == pks[0]: + raise QueryException( + "Can't order by the first primary key (partition key), clustering (secondary) keys only") + + return column.db_field_name, order_type + def values_list(self, *fields, **kwargs): """ Instructs the query set to return tuples, not model instance """ flat = kwargs.pop('flat', False) From d7a7d13f54661b9af781219ce2a1e6de65378ab8 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 16 Jun 2013 10:08:15 -0700 Subject: [PATCH 0261/3726] adding additional tests around named table queries --- cqlengine/tests/query/test_named.py | 128 +++++++++++++++++++++++++++- 1 file changed, 127 insertions(+), 1 deletion(-) diff --git a/cqlengine/tests/query/test_named.py b/cqlengine/tests/query/test_named.py index 6f1327f094..cba46dcb59 100644 --- a/cqlengine/tests/query/test_named.py +++ b/cqlengine/tests/query/test_named.py @@ -1,6 +1,7 @@ from cqlengine import query from cqlengine.named import NamedKeyspace -from cqlengine.tests.query.test_queryset import TestModel, BaseQuerySetUsage +from cqlengine.query import ResultObject +from cqlengine.tests.query.test_queryset import BaseQuerySetUsage from cqlengine.tests.base import BaseCassEngTestCase @@ -80,6 +81,9 @@ class TestQuerySetCountSelectionAndIteration(BaseQuerySetUsage): @classmethod def setUpClass(cls): super(TestQuerySetCountSelectionAndIteration, cls).setUpClass() + + from cqlengine.tests.query.test_queryset import TestModel + ks,tn = TestModel.column_family_name().split('.') cls.keyspace = NamedKeyspace(ks) cls.table = cls.keyspace.table(tn) @@ -99,4 +103,126 @@ def test_query_expression_count(self): q = self.table.objects(self.table.column('test_id') == 0) assert q.count() == 4 + def test_iteration(self): + """ Tests that iterating over a query set pulls back all of the expected results """ + q = self.table.objects(test_id=0) + #tuple of expected attempt_id, expected_result values + compare_set = set([(0,5), (1,10), (2,15), (3,20)]) + for t in q: + val = t.attempt_id, t.expected_result + assert val in compare_set + compare_set.remove(val) + assert len(compare_set) == 0 + + # test with regular filtering + q = self.table.objects(attempt_id=3).allow_filtering() + assert len(q) == 3 + #tuple of expected test_id, expected_result values + compare_set = set([(0,20), (1,20), (2,75)]) + for t in q: + val = t.test_id, t.expected_result + assert val in compare_set + compare_set.remove(val) + assert len(compare_set) == 0 + + # test with query method + q = self.table.objects(self.table.column('attempt_id') == 3).allow_filtering() + assert len(q) == 3 + #tuple of expected test_id, expected_result values + compare_set = set([(0,20), (1,20), (2,75)]) + for t in q: + val = t.test_id, t.expected_result + assert val in compare_set + compare_set.remove(val) + assert len(compare_set) == 0 + + def test_multiple_iterations_work_properly(self): + """ Tests that iterating over a query set more than once works """ + # test with both the filtering method and the query method + for q in (self.table.objects(test_id=0), self.table.objects(self.table.column('test_id') == 0)): + #tuple of expected attempt_id, expected_result values + compare_set = set([(0,5), (1,10), (2,15), (3,20)]) + for t in q: + val = t.attempt_id, t.expected_result + assert val in compare_set + compare_set.remove(val) + assert len(compare_set) == 0 + + #try it again + compare_set = set([(0,5), (1,10), (2,15), (3,20)]) + for t in q: + val = t.attempt_id, t.expected_result + assert val in compare_set + compare_set.remove(val) + assert len(compare_set) == 0 + + def test_multiple_iterators_are_isolated(self): + """ + tests that the use of one iterator does not affect the behavior of another + """ + for q in (self.table.objects(test_id=0), self.table.objects(self.table.column('test_id') == 0)): + q = q.order_by('attempt_id') + expected_order = [0,1,2,3] + iter1 = iter(q) + iter2 = iter(q) + for attempt_id in expected_order: + assert iter1.next().attempt_id == attempt_id + assert iter2.next().attempt_id == attempt_id + + def test_get_success_case(self): + """ + Tests that the .get() method works on new and existing querysets + """ + m = self.table.objects.get(test_id=0, attempt_id=0) + assert isinstance(m, ResultObject) + assert m.test_id == 0 + assert m.attempt_id == 0 + + q = self.table.objects(test_id=0, attempt_id=0) + m = q.get() + assert isinstance(m, ResultObject) + assert m.test_id == 0 + assert m.attempt_id == 0 + + q = self.table.objects(test_id=0) + m = q.get(attempt_id=0) + assert isinstance(m, ResultObject) + assert m.test_id == 0 + assert m.attempt_id == 0 + + def test_query_expression_get_success_case(self): + """ + Tests that the .get() method works on new and existing querysets + """ + m = self.table.get(self.table.column('test_id') == 0, self.table.column('attempt_id') == 0) + assert isinstance(m, ResultObject) + assert m.test_id == 0 + assert m.attempt_id == 0 + + q = self.table.objects(self.table.column('test_id') == 0, self.table.column('attempt_id') == 0) + m = q.get() + assert isinstance(m, ResultObject) + assert m.test_id == 0 + assert m.attempt_id == 0 + + q = self.table.objects(self.table.column('test_id') == 0) + m = q.get(self.table.column('attempt_id') == 0) + assert isinstance(m, ResultObject) + assert m.test_id == 0 + assert m.attempt_id == 0 + + def test_get_doesnotexist_exception(self): + """ + Tests that get calls that don't return a result raises a DoesNotExist error + """ + with self.assertRaises(self.table.DoesNotExist): + self.table.objects.get(test_id=100) + + def test_get_multipleobjects_exception(self): + """ + Tests that get calls that return multiple results raise a MultipleObjectsReturned error + """ + with self.assertRaises(self.table.MultipleObjectsReturned): + self.table.objects.get(test_id=1) + From 423e26e6b4d53e48a6b0ad65e945be06d6546dc4 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 16 Jun 2013 10:27:42 -0700 Subject: [PATCH 0262/3726] removing module name from auto column family name generation --- cqlengine/models.py | 4 ---- cqlengine/tests/model/test_class_construction.py | 8 ++++++++ 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/cqlengine/models.py b/cqlengine/models.py index 3dca106428..6b9536e960 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -187,10 +187,6 @@ def column_family_name(cls, include_keyspace=True): camelcase = re.compile(r'([a-z])([A-Z])') ccase = lambda s: camelcase.sub(lambda v: '{}_{}'.format(v.group(1), v.group(2).lower()), s) - module = cls.__module__.split('.') - if module: - cf_name = ccase(module[-1]) + '_' - cf_name += ccase(cls.__name__) #trim to less than 48 characters or cassandra will complain cf_name = cf_name[-48:] diff --git a/cqlengine/tests/model/test_class_construction.py b/cqlengine/tests/model/test_class_construction.py index 05e77bde9a..f0344c6bd3 100644 --- a/cqlengine/tests/model/test_class_construction.py +++ b/cqlengine/tests/model/test_class_construction.py @@ -107,6 +107,14 @@ class InheritedModel(TestModel): assert 'text' in InheritedModel._columns assert 'numbers' in InheritedModel._columns + def test_column_family_name_generation(self): + """ Tests that auto column family name generation works as expected """ + class TestModel(Model): + id = columns.UUID(primary_key=True, default=lambda:uuid4()) + text = columns.Text() + + assert TestModel.column_family_name(include_keyspace=False) == 'test_model' + def test_normal_fields_can_be_defined_between_primary_keys(self): """ Tests tha non primary key fields can be defined between primary key fields From 61de63acc8863c0b2a46df0a18f871fc94fbb4bf Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Mon, 17 Jun 2013 11:45:37 -0700 Subject: [PATCH 0263/3726] updating changelog --- changelog | 1 + 1 file changed, 1 insertion(+) diff --git a/changelog b/changelog index bb830d8efb..9084cdfc55 100644 --- a/changelog +++ b/changelog @@ -5,6 +5,7 @@ CHANGELOG * explicit primary key is required (automatic id removed) * added validation on keyname types on .create() * changed table_name to __table_name__, read_repair_chance to __read_repair_chance__, keyspace to __keyspace__ +* modified table name auto generator to ignore module name * changed internal implementation of model value get/set 0.3.3 From d9491aa5f5acc7c806dfdc7809d0a9aefa14e62d Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Mon, 17 Jun 2013 11:48:58 -0700 Subject: [PATCH 0264/3726] adding breaking change link to readme --- README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index b0037b9465..10531060e5 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,9 @@ cqlengine =============== -cqlengine is a Cassandra CQL 3 Object Mapper for Python with an interface similar to the Django orm and mongoengine +cqlengine is a Cassandra CQL 3 Object Mapper for Python + +**Users of versions < 0.4, please read this post: [Breaking Changes](https://groups.google.com/forum/?fromgroups#!topic/cqlengine-users/erkSNe1JwuU)** [Documentation](https://cqlengine.readthedocs.org/en/latest/) From fec88a847e8b52b4a0b46bc5dada002f4c6c4d14 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Mon, 17 Jun 2013 16:37:03 -0700 Subject: [PATCH 0265/3726] create uuid1 from datetime #65 --- cqlengine/columns.py | 48 ++++++++++++++++++++++ cqlengine/tests/columns/test_validation.py | 13 +++++- 2 files changed, 59 insertions(+), 2 deletions(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index 941d2419aa..0838195ddb 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -304,6 +304,9 @@ def to_python(self, value): def to_database(self, value): return self.validate(value) +_last_timestamp = None +from uuid import UUID as pyUUID, getnode + class TimeUUID(UUID): """ UUID containing timestamp @@ -311,6 +314,50 @@ class TimeUUID(UUID): db_type = 'timeuuid' + @classmethod + def from_datetime(self, dt): + """ + generates a UUID for a given datetime + + :param dt: datetime + :type dt: datetime + :return: + """ + global _last_timestamp + + epoch = datetime(1970, 1, 1, tzinfo=dt.tzinfo) + + offset = 0 + if epoch.tzinfo: + offset_delta = epoch.tzinfo.utcoffset(epoch) + offset = offset_delta.days*24*3600 + offset_delta.seconds + + timestamp = (dt - epoch).total_seconds() - offset + + node = None + clock_seq = None + + nanoseconds = int(timestamp * 1e9) + timestamp = int(nanoseconds // 100) + 0x01b21dd213814000L + + if _last_timestamp is not None and timestamp <= _last_timestamp: + timestamp = _last_timestamp + 1 + _last_timestamp = timestamp + if clock_seq is None: + import random + clock_seq = random.randrange(1 << 14L) # instead of stable storage + time_low = timestamp & 0xffffffffL + time_mid = (timestamp >> 32L) & 0xffffL + time_hi_version = (timestamp >> 48L) & 0x0fffL + clock_seq_low = clock_seq & 0xffL + clock_seq_hi_variant = (clock_seq >> 8L) & 0x3fL + if node is None: + node = getnode() + return pyUUID(fields=(time_low, time_mid, time_hi_version, + clock_seq_hi_variant, clock_seq_low, node), version=1) + + + class Boolean(Column): db_type = 'boolean' @@ -325,6 +372,7 @@ def to_python(self, value): def to_database(self, value): return self.Quoter(bool(value)) + class Float(Column): db_type = 'double' diff --git a/cqlengine/tests/columns/test_validation.py b/cqlengine/tests/columns/test_validation.py index 627ccbd3e6..a68958bb23 100644 --- a/cqlengine/tests/columns/test_validation.py +++ b/cqlengine/tests/columns/test_validation.py @@ -3,6 +3,7 @@ from datetime import date from datetime import tzinfo from decimal import Decimal as D +from unittest import TestCase from uuid import uuid4, uuid1 from cqlengine import ValidationError @@ -204,10 +205,18 @@ def test_extra_field(self): self.TestModel.create(bacon=5000) +class TestTimeUUIDFromDatetime(TestCase): + def test_conversion_specific_date(self): + dt = datetime(1981, 7, 11, microsecond=555000) + uuid = TimeUUID.from_datetime(dt) + from uuid import UUID + assert isinstance(uuid, UUID) + ts = (uuid.time - 0x01b21dd213814000) / 1e7 # back to a timestamp + new_dt = datetime.utcfromtimestamp(ts) - - + # checks that we created a UUID1 with the proper timestamp + assert new_dt == dt From fed38658460871eaa1ff86b036c577406f20dc7b Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Mon, 17 Jun 2013 16:39:26 -0700 Subject: [PATCH 0266/3726] updated changelog --- changelog | 2 ++ 1 file changed, 2 insertions(+) diff --git a/changelog b/changelog index 9084cdfc55..5f52491b32 100644 --- a/changelog +++ b/changelog @@ -7,6 +7,8 @@ CHANGELOG * changed table_name to __table_name__, read_repair_chance to __read_repair_chance__, keyspace to __keyspace__ * modified table name auto generator to ignore module name * changed internal implementation of model value get/set +* added TimeUUID.from_datetime(), used for generating UUID1's for a specific +time 0.3.3 * added abstract base class models From 071b54a76bafa1f62344a226f41c93fe2ab22142 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Tue, 18 Jun 2013 11:50:22 -0700 Subject: [PATCH 0267/3726] updating setup --- setup.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/setup.py b/setup.py index 512d55e902..fb2fc15630 100644 --- a/setup.py +++ b/setup.py @@ -7,7 +7,7 @@ version = '0.3.3' long_desc = """ -cqlengine is a Cassandra CQL Object Mapper for Python in the style of the Django orm and mongoengine +Cassandra CQL 3 Object Mapper for Python [Documentation](https://cqlengine.readthedocs.org/en/latest/) @@ -21,8 +21,7 @@ setup( name='cqlengine', version=version, - description='Cassandra CQL ORM for Python in the style of the Django orm and mongoengine', - dependency_links = ['https://github.com/bdeggleston/cqlengine/archive/{0}.tar.gz#egg=cqlengine-{0}'.format(version)], + description='Cassandra CQL 3 Object Mapper for Python', long_description=long_desc, classifiers = [ "Environment :: Web Environment", @@ -38,7 +37,7 @@ install_requires = ['cql'], author='Blake Eggleston', author_email='bdeggleston@gmail.com', - url='https://github.com/bdeggleston/cqlengine', + url='https://github.com/cqlengine/cqlengine', license='BSD', packages=find_packages(), include_package_data=True, From 474bfb4c098a6240edf01c917a529a3cc3ca0e34 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Tue, 18 Jun 2013 11:51:21 -0700 Subject: [PATCH 0268/3726] version number bump --- cqlengine/__init__.py | 2 +- docs/conf.py | 4 ++-- setup.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cqlengine/__init__.py b/cqlengine/__init__.py index d103644c90..3e2d00b097 100644 --- a/cqlengine/__init__.py +++ b/cqlengine/__init__.py @@ -3,5 +3,5 @@ from cqlengine.models import Model from cqlengine.query import BatchQuery -__version__ = '0.3.3' +__version__ = '0.4' diff --git a/docs/conf.py b/docs/conf.py index 5d797fee3e..50f1bd0129 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -48,9 +48,9 @@ # built documents. # # The short X.Y version. -version = '0.3.3' +version = '0.4' # The full version, including alpha/beta/rc tags. -release = '0.3.3' +release = '0.4' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/setup.py b/setup.py index fb2fc15630..1b6b8b8d5e 100644 --- a/setup.py +++ b/setup.py @@ -4,7 +4,7 @@ #python setup.py register #python setup.py sdist upload -version = '0.3.3' +version = '0.4' long_desc = """ Cassandra CQL 3 Object Mapper for Python From b5d0ddad8bcf8eef4da51348a49fe128b3fd49de Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Tue, 18 Jun 2013 11:57:35 -0700 Subject: [PATCH 0269/3726] removing alpha warning --- docs/index.rst | 2 -- 1 file changed, 2 deletions(-) diff --git a/docs/index.rst b/docs/index.rst index f93a8505c4..2901f2aff4 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -88,8 +88,6 @@ Getting Started `Dev Mailing List `_ -**NOTE: cqlengine is in alpha and under development, some features may change. Make sure to check the changelog and test your app before upgrading** - Indices and tables ================== From cc1af16c1c80d4acb631e4769903c4a2d78b3d98 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Tue, 18 Jun 2013 17:50:30 -0700 Subject: [PATCH 0270/3726] fixed TS issue --- cqlengine/columns.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index 0838195ddb..0210e8dbb8 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -304,7 +304,6 @@ def to_python(self, value): def to_database(self, value): return self.validate(value) -_last_timestamp = None from uuid import UUID as pyUUID, getnode class TimeUUID(UUID): @@ -340,9 +339,6 @@ def from_datetime(self, dt): nanoseconds = int(timestamp * 1e9) timestamp = int(nanoseconds // 100) + 0x01b21dd213814000L - if _last_timestamp is not None and timestamp <= _last_timestamp: - timestamp = _last_timestamp + 1 - _last_timestamp = timestamp if clock_seq is None: import random clock_seq = random.randrange(1 << 14L) # instead of stable storage From 1d4e9b0957070092e02eb4a659370255a896fb10 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Tue, 18 Jun 2013 17:53:53 -0700 Subject: [PATCH 0271/3726] bugfix for timeuuids --- cqlengine/__init__.py | 2 +- docs/conf.py | 2 +- setup.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cqlengine/__init__.py b/cqlengine/__init__.py index 3e2d00b097..df941b0fff 100644 --- a/cqlengine/__init__.py +++ b/cqlengine/__init__.py @@ -3,5 +3,5 @@ from cqlengine.models import Model from cqlengine.query import BatchQuery -__version__ = '0.4' +__version__ = '0.4.1' diff --git a/docs/conf.py b/docs/conf.py index 50f1bd0129..5aff68540e 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -50,7 +50,7 @@ # The short X.Y version. version = '0.4' # The full version, including alpha/beta/rc tags. -release = '0.4' +release = '0.4.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/setup.py b/setup.py index 1b6b8b8d5e..ff3dcddc4b 100644 --- a/setup.py +++ b/setup.py @@ -4,7 +4,7 @@ #python setup.py register #python setup.py sdist upload -version = '0.4' +version = '0.4.1' long_desc = """ Cassandra CQL 3 Object Mapper for Python From dad5effe7b79d86f8c39444cc0c2b93ac9ed33ec Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Wed, 19 Jun 2013 09:55:54 -0700 Subject: [PATCH 0272/3726] adding change warning to all doc pages --- docs/index.rst | 4 ++++ docs/topics/columns.rst | 4 ++++ docs/topics/connection.rst | 4 ++++ docs/topics/manage_schemas.rst | 6 ++++++ docs/topics/models.rst | 6 ++++++ docs/topics/queryset.rst | 6 ++++++ 6 files changed, 30 insertions(+) diff --git a/docs/index.rst b/docs/index.rst index 2901f2aff4..9dff72fd56 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -5,6 +5,10 @@ cqlengine documentation ======================= +**Users of versions < 0.4, please read this post before upgrading:** `Breaking Changes`_ + +.. _Breaking Changes: https://groups.google.com/forum/?fromgroups#!topic/cqlengine-users/erkSNe1JwuU + cqlengine is a Cassandra CQL 3 Object Mapper for Python with an interface similar to the Django orm and mongoengine :ref:`getting-started` diff --git a/docs/topics/columns.rst b/docs/topics/columns.rst index 61252e7f97..987d7d7c4f 100644 --- a/docs/topics/columns.rst +++ b/docs/topics/columns.rst @@ -2,6 +2,10 @@ Columns ======= +**Users of versions < 0.4, please read this post before upgrading:** `Breaking Changes`_ + +.. _Breaking Changes: https://groups.google.com/forum/?fromgroups#!topic/cqlengine-users/erkSNe1JwuU + .. module:: cqlengine.columns .. class:: Bytes() diff --git a/docs/topics/connection.rst b/docs/topics/connection.rst index 48558f9c46..88f601fbd0 100644 --- a/docs/topics/connection.rst +++ b/docs/topics/connection.rst @@ -2,6 +2,10 @@ Connection ============== +**Users of versions < 0.4, please read this post before upgrading:** `Breaking Changes`_ + +.. _Breaking Changes: https://groups.google.com/forum/?fromgroups#!topic/cqlengine-users/erkSNe1JwuU + .. module:: cqlengine.connection The setup function in `cqlengine.connection` records the Cassandra servers to connect to. diff --git a/docs/topics/manage_schemas.rst b/docs/topics/manage_schemas.rst index 2174e56f6a..413b53c481 100644 --- a/docs/topics/manage_schemas.rst +++ b/docs/topics/manage_schemas.rst @@ -2,6 +2,12 @@ Managing Schmas =============== +**Users of versions < 0.4, please read this post before upgrading:** `Breaking Changes`_ + +.. _Breaking Changes: https://groups.google.com/forum/?fromgroups#!topic/cqlengine-users/erkSNe1JwuU + +.. module:: cqlengine.connection + .. module:: cqlengine.management Once a connection has been made to Cassandra, you can use the functions in ``cqlengine.management`` to create and delete keyspaces, as well as create and delete tables for defined models diff --git a/docs/topics/models.rst b/docs/topics/models.rst index 5dc322f61b..f599f2cfb7 100644 --- a/docs/topics/models.rst +++ b/docs/topics/models.rst @@ -2,6 +2,12 @@ Models ====== +**Users of versions < 0.4, please read this post before upgrading:** `Breaking Changes`_ + +.. _Breaking Changes: https://groups.google.com/forum/?fromgroups#!topic/cqlengine-users/erkSNe1JwuU + +.. module:: cqlengine.connection + .. module:: cqlengine.models A model is a python class representing a CQL table. diff --git a/docs/topics/queryset.rst b/docs/topics/queryset.rst index c237ed6b59..0a481fbe9f 100644 --- a/docs/topics/queryset.rst +++ b/docs/topics/queryset.rst @@ -2,6 +2,12 @@ Making Queries ============== +**Users of versions < 0.4, please read this post before upgrading:** `Breaking Changes`_ + +.. _Breaking Changes: https://groups.google.com/forum/?fromgroups#!topic/cqlengine-users/erkSNe1JwuU + +.. module:: cqlengine.connection + .. module:: cqlengine.query Retrieving objects From 147158eb99c16530e8cf40d8324ba8059eb234cc Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Wed, 19 Jun 2013 14:50:43 -0700 Subject: [PATCH 0273/3726] updating changelog --- changelog | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/changelog b/changelog index 5f52491b32..29a8b3f208 100644 --- a/changelog +++ b/changelog @@ -1,6 +1,12 @@ CHANGELOG -0.4.0 (in progress) +0.4.2 +* added support for instantiating container columns with column instances + +0.4.1 +* fixed bug in TimeUUID from datetime method + +0.4.0 * removed default values from all column types * explicit primary key is required (automatic id removed) * added validation on keyname types on .create() From fb5aba9ba69318b90057f6bfad434a2d4459bae2 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Wed, 19 Jun 2013 14:57:33 -0700 Subject: [PATCH 0274/3726] adding support for instantiating container columns with column instances, as well as classes --- cqlengine/columns.py | 14 +++++++++---- .../tests/columns/test_container_columns.py | 21 +++++++++++++++++++ 2 files changed, 31 insertions(+), 4 deletions(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index 0210e8dbb8..bc71e51ab4 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -424,15 +424,21 @@ def __init__(self, value_type, **kwargs): """ :param value_type: a column class indicating the types of the value """ - if not issubclass(value_type, Column): + inheritance_comparator = issubclass if isinstance(value_type, type) else isinstance + if not inheritance_comparator(value_type, Column): raise ValidationError('value_type must be a column class') - if issubclass(value_type, BaseContainerColumn): + if inheritance_comparator(value_type, BaseContainerColumn): raise ValidationError('container types cannot be nested') if value_type.db_type is None: raise ValidationError('value_type cannot be an abstract column type') - self.value_type = value_type - self.value_col = self.value_type() + if isinstance(value_type, type): + self.value_type = value_type + self.value_col = self.value_type() + else: + self.value_col = value_type + self.value_type = self.value_col.__class__ + super(BaseContainerColumn, self).__init__(**kwargs) def get_column_def(self): diff --git a/cqlengine/tests/columns/test_container_columns.py b/cqlengine/tests/columns/test_container_columns.py index 3533aad3ef..989978ac2a 100644 --- a/cqlengine/tests/columns/test_container_columns.py +++ b/cqlengine/tests/columns/test_container_columns.py @@ -6,6 +6,27 @@ from cqlengine.management import create_table, delete_table from cqlengine.tests.base import BaseCassEngTestCase +class TestClassConstruction(BaseCassEngTestCase): + """ + Tests around the instantiation of container columns + """ + + def test_instantiation_with_column_class(self): + """ + Tests that columns instantiated with a column class work properly + and that the class is instantiated in the constructor + """ + column = columns.List(columns.Text) + assert isinstance(column.value_col, columns.Text) + + def test_instantiation_with_column_instance(self): + """ + Tests that columns instantiated with a column instance work properly + """ + column = columns.List(columns.Text(min_length=100)) + assert isinstance(column.value_col, columns.Text) + + class TestSetModel(Model): partition = columns.UUID(primary_key=True, default=uuid4) int_set = columns.Set(columns.Integer, required=False) From c81d01d0f765087bc6ed97d4f860f33492d920c9 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Wed, 19 Jun 2013 15:02:24 -0700 Subject: [PATCH 0275/3726] adding column instance support to map keys as well --- cqlengine/columns.py | 13 ++-- .../tests/columns/test_container_columns.py | 61 ++++++++++++++----- 2 files changed, 56 insertions(+), 18 deletions(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index bc71e51ab4..2c74ed1028 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -653,15 +653,20 @@ def __init__(self, key_type, value_type, **kwargs): :param key_type: a column class indicating the types of the key :param value_type: a column class indicating the types of the value """ - if not issubclass(value_type, Column): + inheritance_comparator = issubclass if isinstance(key_type, type) else isinstance + if not inheritance_comparator(key_type, Column): raise ValidationError('key_type must be a column class') - if issubclass(value_type, BaseContainerColumn): + if inheritance_comparator(key_type, BaseContainerColumn): raise ValidationError('container types cannot be nested') if key_type.db_type is None: raise ValidationError('key_type cannot be an abstract column type') - self.key_type = key_type - self.key_col = self.key_type() + if isinstance(key_type, type): + self.key_type = key_type + self.key_col = self.key_type() + else: + self.key_col = key_type + self.key_type = self.key_col.__class__ super(Map, self).__init__(value_type, **kwargs) def get_column_def(self): diff --git a/cqlengine/tests/columns/test_container_columns.py b/cqlengine/tests/columns/test_container_columns.py index 989978ac2a..6ba261d907 100644 --- a/cqlengine/tests/columns/test_container_columns.py +++ b/cqlengine/tests/columns/test_container_columns.py @@ -11,20 +11,6 @@ class TestClassConstruction(BaseCassEngTestCase): Tests around the instantiation of container columns """ - def test_instantiation_with_column_class(self): - """ - Tests that columns instantiated with a column class work properly - and that the class is instantiated in the constructor - """ - column = columns.List(columns.Text) - assert isinstance(column.value_col, columns.Text) - - def test_instantiation_with_column_instance(self): - """ - Tests that columns instantiated with a column instance work properly - """ - column = columns.List(columns.Text(min_length=100)) - assert isinstance(column.value_col, columns.Text) class TestSetModel(Model): @@ -93,6 +79,21 @@ def test_partial_update_creation(self): assert len([s for s in statements if '"TEST" = "TEST" -' in s]) == 1 assert len([s for s in statements if '"TEST" = "TEST" +' in s]) == 1 + def test_instantiation_with_column_class(self): + """ + Tests that columns instantiated with a column class work properly + and that the class is instantiated in the constructor + """ + column = columns.Set(columns.Text) + assert isinstance(column.value_col, columns.Text) + + def test_instantiation_with_column_instance(self): + """ + Tests that columns instantiated with a column instance work properly + """ + column = columns.Set(columns.Text(min_length=100)) + assert isinstance(column.value_col, columns.Text) + class TestListModel(Model): partition = columns.UUID(primary_key=True, default=uuid4) int_list = columns.List(columns.Integer, required=False) @@ -164,6 +165,21 @@ def test_partial_update_creation(self): assert len([s for s in statements if '"TEST" = "TEST" +' in s]) == 1 assert len([s for s in statements if '+ "TEST"' in s]) == 1 + def test_instantiation_with_column_class(self): + """ + Tests that columns instantiated with a column class work properly + and that the class is instantiated in the constructor + """ + column = columns.List(columns.Text) + assert isinstance(column.value_col, columns.Text) + + def test_instantiation_with_column_instance(self): + """ + Tests that columns instantiated with a column instance work properly + """ + column = columns.List(columns.Text(min_length=100)) + assert isinstance(column.value_col, columns.Text) + class TestMapModel(Model): partition = columns.UUID(primary_key=True, default=uuid4) int_map = columns.Map(columns.Integer, columns.UUID, required=False) @@ -251,6 +267,23 @@ def test_updates_to_none(self): m2 = TestMapModel.get(partition=m.partition) assert m2.int_map is None + def test_instantiation_with_column_class(self): + """ + Tests that columns instantiated with a column class work properly + and that the class is instantiated in the constructor + """ + column = columns.Map(columns.Text, columns.Integer) + assert isinstance(column.key_col, columns.Text) + assert isinstance(column.value_col, columns.Integer) + + def test_instantiation_with_column_instance(self): + """ + Tests that columns instantiated with a column instance work properly + """ + column = columns.Map(columns.Text(min_length=100), columns.Integer()) + assert isinstance(column.key_col, columns.Text) + assert isinstance(column.value_col, columns.Integer) + # def test_partial_update_creation(self): # """ # Tests that proper update statements are created for a partial list update From d78a452087dc1bf4a960420949e14e78dc58ba5b Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Wed, 19 Jun 2013 15:03:10 -0700 Subject: [PATCH 0276/3726] removing now empty test class --- cqlengine/tests/columns/test_container_columns.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/cqlengine/tests/columns/test_container_columns.py b/cqlengine/tests/columns/test_container_columns.py index 6ba261d907..c27c03289d 100644 --- a/cqlengine/tests/columns/test_container_columns.py +++ b/cqlengine/tests/columns/test_container_columns.py @@ -6,18 +6,13 @@ from cqlengine.management import create_table, delete_table from cqlengine.tests.base import BaseCassEngTestCase -class TestClassConstruction(BaseCassEngTestCase): - """ - Tests around the instantiation of container columns - """ - - class TestSetModel(Model): partition = columns.UUID(primary_key=True, default=uuid4) int_set = columns.Set(columns.Integer, required=False) text_set = columns.Set(columns.Text, required=False) + class TestSetColumn(BaseCassEngTestCase): @classmethod @@ -94,11 +89,13 @@ def test_instantiation_with_column_instance(self): column = columns.Set(columns.Text(min_length=100)) assert isinstance(column.value_col, columns.Text) + class TestListModel(Model): partition = columns.UUID(primary_key=True, default=uuid4) int_list = columns.List(columns.Integer, required=False) text_list = columns.List(columns.Text, required=False) + class TestListColumn(BaseCassEngTestCase): @classmethod @@ -180,11 +177,13 @@ def test_instantiation_with_column_instance(self): column = columns.List(columns.Text(min_length=100)) assert isinstance(column.value_col, columns.Text) + class TestMapModel(Model): partition = columns.UUID(primary_key=True, default=uuid4) int_map = columns.Map(columns.Integer, columns.UUID, required=False) text_map = columns.Map(columns.Text, columns.DateTime, required=False) + class TestMapColumn(BaseCassEngTestCase): @classmethod From 01cf5bf0b00622f8849d9289920edacab4436e28 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Wed, 19 Jun 2013 15:04:24 -0700 Subject: [PATCH 0277/3726] version number bump --- cqlengine/__init__.py | 2 +- docs/conf.py | 4 ++-- setup.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cqlengine/__init__.py b/cqlengine/__init__.py index df941b0fff..1bdc2ce5eb 100644 --- a/cqlengine/__init__.py +++ b/cqlengine/__init__.py @@ -3,5 +3,5 @@ from cqlengine.models import Model from cqlengine.query import BatchQuery -__version__ = '0.4.1' +__version__ = '0.4.2' diff --git a/docs/conf.py b/docs/conf.py index 5aff68540e..8dc1ccd58c 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -48,9 +48,9 @@ # built documents. # # The short X.Y version. -version = '0.4' +version = '0.4.2' # The full version, including alpha/beta/rc tags. -release = '0.4.1' +release = '0.4.2' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/setup.py b/setup.py index ff3dcddc4b..494f2d7947 100644 --- a/setup.py +++ b/setup.py @@ -4,7 +4,7 @@ #python setup.py register #python setup.py sdist upload -version = '0.4.1' +version = '0.4.2' long_desc = """ Cassandra CQL 3 Object Mapper for Python From 43b6eb3c8dca2d3d372ecd308ebe024b7777d707 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 20 Jun 2013 10:30:06 -0700 Subject: [PATCH 0278/3726] fixing text min length calculation to work with new required=False default --- cqlengine/columns.py | 2 +- cqlengine/tests/columns/test_validation.py | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index 2c74ed1028..8f52e3f885 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -197,7 +197,7 @@ class Text(Column): db_type = 'text' def __init__(self, *args, **kwargs): - self.min_length = kwargs.pop('min_length', 1 if kwargs.get('required', True) else None) + self.min_length = kwargs.pop('min_length', 1 if kwargs.get('required', False) else None) self.max_length = kwargs.pop('max_length', None) super(Text, self).__init__(*args, **kwargs) diff --git a/cqlengine/tests/columns/test_validation.py b/cqlengine/tests/columns/test_validation.py index a68958bb23..46e8516364 100644 --- a/cqlengine/tests/columns/test_validation.py +++ b/cqlengine/tests/columns/test_validation.py @@ -194,6 +194,12 @@ def test_type_checking(self): with self.assertRaises(ValidationError): Text().validate(True) + def test_non_required_validation(self): + """ Tests that validation is ok on none and blank values if required is False """ + Text().validate('') + Text().validate(None) + + class TestExtraFieldsRaiseException(BaseCassEngTestCase): From 6587d383c27ab05a5c4a2bd0fda8e06e1beb2530 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 20 Jun 2013 10:41:57 -0700 Subject: [PATCH 0279/3726] 0.4.3 version bump --- changelog | 3 +++ cqlengine/__init__.py | 2 +- docs/conf.py | 4 ++-- setup.py | 2 +- 4 files changed, 7 insertions(+), 4 deletions(-) diff --git a/changelog b/changelog index 29a8b3f208..1c147a6706 100644 --- a/changelog +++ b/changelog @@ -1,5 +1,8 @@ CHANGELOG +0.4.3 +* fixed bug with Text column validation + 0.4.2 * added support for instantiating container columns with column instances diff --git a/cqlengine/__init__.py b/cqlengine/__init__.py index 1bdc2ce5eb..2ab73db5d1 100644 --- a/cqlengine/__init__.py +++ b/cqlengine/__init__.py @@ -3,5 +3,5 @@ from cqlengine.models import Model from cqlengine.query import BatchQuery -__version__ = '0.4.2' +__version__ = '0.4.3' diff --git a/docs/conf.py b/docs/conf.py index 8dc1ccd58c..3de8e4dd9f 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -48,9 +48,9 @@ # built documents. # # The short X.Y version. -version = '0.4.2' +version = '0.4.3' # The full version, including alpha/beta/rc tags. -release = '0.4.2' +release = '0.4.3' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/setup.py b/setup.py index 494f2d7947..99fa7b3f69 100644 --- a/setup.py +++ b/setup.py @@ -4,7 +4,7 @@ #python setup.py register #python setup.py sdist upload -version = '0.4.2' +version = '0.4.3' long_desc = """ Cassandra CQL 3 Object Mapper for Python From 9634f47ea57c2122e9060ec6e94c445e1c941f45 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 20 Jun 2013 10:48:10 -0700 Subject: [PATCH 0280/3726] updating failing test --- cqlengine/tests/columns/test_validation.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/cqlengine/tests/columns/test_validation.py b/cqlengine/tests/columns/test_validation.py index 46e8516364..53cd738c02 100644 --- a/cqlengine/tests/columns/test_validation.py +++ b/cqlengine/tests/columns/test_validation.py @@ -158,9 +158,7 @@ class TestText(BaseCassEngTestCase): def test_min_length(self): #min len defaults to 1 col = Text() - - with self.assertRaises(ValidationError): - col.validate('') + col.validate('') col.validate('b') From 174aa4aa1b946f7958ac479821ed32879890f88f Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Fri, 21 Jun 2013 10:41:34 -0700 Subject: [PATCH 0281/3726] updated batch docs --- docs/topics/queryset.rst | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/docs/topics/queryset.rst b/docs/topics/queryset.rst index c237ed6b59..47fc4ef3e0 100644 --- a/docs/topics/queryset.rst +++ b/docs/topics/queryset.rst @@ -291,6 +291,15 @@ Batch Queries em3 = ExampleModel.batch(b).create(example_type=0, description="3", created_at=now) b.execute() + # updating in a batch + + b = BatchQuery() + em1.description = "new description" + em1.batch(b).save() + em2.description = "another new description" + em2.batch(b).save() + b.execute() + QuerySet method reference ========================= From 7d62430cc849d1f38a7069e518574f3ec04133ec Mon Sep 17 00:00:00 2001 From: Kai Lautaportti Date: Mon, 24 Jun 2013 10:49:24 +0300 Subject: [PATCH 0282/3726] Restore CQL query logging at DEBUG level. --- cqlengine/connection.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index 5010aebc1a..4119d7f9bc 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -141,6 +141,7 @@ def execute(self, query, params): con = self.get() cur = con.cursor() cur.execute(query, params) + LOG.debug('{} {}'.format(query, repr(params))) self.put(con) return cur except cql.ProgrammingError as ex: From ad6d7b834ef228f522d2c80eee689ecac45eee93 Mon Sep 17 00:00:00 2001 From: Kai Lautaportti Date: Fri, 7 Jun 2013 23:17:31 +0300 Subject: [PATCH 0283/3726] Make ValueQuoter objects equal if their repr() are equal. This makes model instances which have quoted values to equal in an intuitive manner. --- cqlengine/columns.py | 5 +++++ cqlengine/tests/columns/test_value_io.py | 11 +++++++++++ 2 files changed, 16 insertions(+) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index 8f52e3f885..8191c2abcc 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -65,6 +65,11 @@ def __str__(self): def __repr__(self): return self.__str__() + def __eq__(self, other): + if isinstance(other, self.__class__): + return repr(self) == repr(other) + return False + class Column(object): diff --git a/cqlengine/tests/columns/test_value_io.py b/cqlengine/tests/columns/test_value_io.py index fda632e965..64054279cc 100644 --- a/cqlengine/tests/columns/test_value_io.py +++ b/cqlengine/tests/columns/test_value_io.py @@ -7,7 +7,10 @@ from cqlengine.management import create_table from cqlengine.management import delete_table from cqlengine.models import Model +from cqlengine.columns import ValueQuoter from cqlengine import columns +import unittest + class BaseColumnIOTest(BaseCassEngTestCase): """ @@ -145,3 +148,11 @@ class TestDecimalIO(BaseColumnIOTest): def comparator_converter(self, val): return Decimal(val) + +class TestQuoter(unittest.TestCase): + + def test_equals(self): + assert ValueQuoter(False) == ValueQuoter(False) + assert ValueQuoter(1) == ValueQuoter(1) + assert ValueQuoter("foo") == ValueQuoter("foo") + assert ValueQuoter(1.55) == ValueQuoter(1.55) From ed46c0c5ab62f84a36e511326daacd88979174c3 Mon Sep 17 00:00:00 2001 From: Kai Lautaportti Date: Fri, 7 Jun 2013 19:18:07 +0300 Subject: [PATCH 0284/3726] Fixed a bug when NetworkTopologyStrategy is used. Although the Cassandra documentation implies that the `replication_factor` parameter would be ignored in this case its presence will cause an error when creating a keyspace using NetworkTopologyStrategy. --- cqlengine/management.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/cqlengine/management.py b/cqlengine/management.py index b3504b919e..e8a692e0be 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -30,6 +30,12 @@ def create_keyspace(name, strategy_class='SimpleStrategy', replication_factor=3, } replication_map.update(replication_values) + if strategy_class.lower() != 'simplestrategy': + # Although the Cassandra documentation states for `replication_factor` + # that it is "Required if class is SimpleStrategy; otherwise, + # not used." we get an error if it is present. + replication_map.pop('replication_factor', None) + query = """ CREATE KEYSPACE {} WITH REPLICATION = {} From 934c8b34f31d2b5d031def97ae9fa4c15c664240 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Tue, 25 Jun 2013 13:39:06 -0700 Subject: [PATCH 0285/3726] fixing bug that would cause failures when updating a model with an empty list --- cqlengine/columns.py | 10 ++++++ .../tests/columns/test_container_columns.py | 31 ++++++++++++++++--- 2 files changed, 37 insertions(+), 4 deletions(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index 8f52e3f885..9c5199866f 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -585,10 +585,20 @@ def _insert(): if val is None or val == prev: return [] + elif prev is None: return _insert() + elif len(val) < len(prev): + # if elements have been removed, + # rewrite the whole list return _insert() + + elif len(prev) == 0: + # if we're updating from an empty + # list, do a complete insert + return _insert() + else: # the prepend and append lists, # if both of these are still None after looking diff --git a/cqlengine/tests/columns/test_container_columns.py b/cqlengine/tests/columns/test_container_columns.py index c27c03289d..03b152c3aa 100644 --- a/cqlengine/tests/columns/test_container_columns.py +++ b/cqlengine/tests/columns/test_container_columns.py @@ -146,10 +146,7 @@ def test_partial_updates(self): assert list(m2.int_list) == final def test_partial_update_creation(self): - """ - Tests that proper update statements are created for a partial list update - :return: - """ + """ Tests that proper update statements are created for a partial list update """ final = range(10) initial = final[3:7] @@ -162,6 +159,32 @@ def test_partial_update_creation(self): assert len([s for s in statements if '"TEST" = "TEST" +' in s]) == 1 assert len([s for s in statements if '+ "TEST"' in s]) == 1 + def test_update_from_none(self): + """ Tests that updating an 'None' list creates a straight insert statement """ + ctx = {} + col = columns.List(columns.Integer, db_field="TEST") + statements = col.get_update_statement([1, 2, 3], None, ctx) + + #only one variable /statement should be generated + assert len(ctx) == 1 + assert len(statements) == 1 + + assert str(ctx.values()[0]) == str([1, 2, 3]) + assert statements[0] == '"TEST" = :{}'.format(ctx.keys()[0]) + + def test_update_from_empty(self): + """ Tests that updating an empty list creates a straight insert statement """ + ctx = {} + col = columns.List(columns.Integer, db_field="TEST") + statements = col.get_update_statement([1, 2, 3], [], ctx) + + #only one variable /statement should be generated + assert len(ctx) == 1 + assert len(statements) == 1 + + assert str(ctx.values()[0]) == str([1,2,3]) + assert statements[0] == '"TEST" = :{}'.format(ctx.keys()[0]) + def test_instantiation_with_column_class(self): """ Tests that columns instantiated with a column class work properly From 27ba94a03a5df562eccd695dd235031d4d2490dc Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Tue, 25 Jun 2013 13:44:26 -0700 Subject: [PATCH 0286/3726] adding tests around updating empty and null sets --- .../tests/columns/test_container_columns.py | 30 +++++++++++++++++-- 1 file changed, 28 insertions(+), 2 deletions(-) diff --git a/cqlengine/tests/columns/test_container_columns.py b/cqlengine/tests/columns/test_container_columns.py index 03b152c3aa..e14adc6971 100644 --- a/cqlengine/tests/columns/test_container_columns.py +++ b/cqlengine/tests/columns/test_container_columns.py @@ -74,6 +74,32 @@ def test_partial_update_creation(self): assert len([s for s in statements if '"TEST" = "TEST" -' in s]) == 1 assert len([s for s in statements if '"TEST" = "TEST" +' in s]) == 1 + def test_update_from_none(self): + """ Tests that updating an 'None' list creates a straight insert statement """ + ctx = {} + col = columns.Set(columns.Integer, db_field="TEST") + statements = col.get_update_statement({1,2,3,4}, None, ctx) + + #only one variable /statement should be generated + assert len(ctx) == 1 + assert len(statements) == 1 + + assert ctx.values()[0].value == {1,2,3,4} + assert statements[0] == '"TEST" = :{}'.format(ctx.keys()[0]) + + def test_update_from_empty(self): + """ Tests that updating an empty list creates a straight insert statement """ + ctx = {} + col = columns.Set(columns.Integer, db_field="TEST") + statements = col.get_update_statement({1,2,3,4}, set(), ctx) + + #only one variable /statement should be generated + assert len(ctx) == 1 + assert len(statements) == 1 + + assert ctx.values()[0].value == {1,2,3,4} + assert statements[0] == '"TEST" = :{}'.format(ctx.keys()[0]) + def test_instantiation_with_column_class(self): """ Tests that columns instantiated with a column class work properly @@ -169,7 +195,7 @@ def test_update_from_none(self): assert len(ctx) == 1 assert len(statements) == 1 - assert str(ctx.values()[0]) == str([1, 2, 3]) + assert ctx.values()[0].value == [1, 2, 3] assert statements[0] == '"TEST" = :{}'.format(ctx.keys()[0]) def test_update_from_empty(self): @@ -182,7 +208,7 @@ def test_update_from_empty(self): assert len(ctx) == 1 assert len(statements) == 1 - assert str(ctx.values()[0]) == str([1,2,3]) + assert ctx.values()[0].value == [1,2,3] assert statements[0] == '"TEST" = :{}'.format(ctx.keys()[0]) def test_instantiation_with_column_class(self): From 1aacad2673f5848bf9da0c955d4da343b705e06d Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Tue, 25 Jun 2013 13:45:14 -0700 Subject: [PATCH 0287/3726] reformatting --- .../tests/columns/test_container_columns.py | 65 +++++++++---------- 1 file changed, 31 insertions(+), 34 deletions(-) diff --git a/cqlengine/tests/columns/test_container_columns.py b/cqlengine/tests/columns/test_container_columns.py index e14adc6971..227502b12a 100644 --- a/cqlengine/tests/columns/test_container_columns.py +++ b/cqlengine/tests/columns/test_container_columns.py @@ -8,13 +8,12 @@ class TestSetModel(Model): - partition = columns.UUID(primary_key=True, default=uuid4) - int_set = columns.Set(columns.Integer, required=False) - text_set = columns.Set(columns.Text, required=False) + partition = columns.UUID(primary_key=True, default=uuid4) + int_set = columns.Set(columns.Integer, required=False) + text_set = columns.Set(columns.Text, required=False) class TestSetColumn(BaseCassEngTestCase): - @classmethod def setUpClass(cls): super(TestSetColumn, cls).setUpClass() @@ -28,7 +27,7 @@ def tearDownClass(cls): def test_io_success(self): """ Tests that a basic usage works as expected """ - m1 = TestSetModel.create(int_set={1,2}, text_set={'kai', 'andreas'}) + m1 = TestSetModel.create(int_set={1, 2}, text_set={'kai', 'andreas'}) m2 = TestSetModel.get(partition=m1.partition) assert isinstance(m2.int_set, set) @@ -49,16 +48,16 @@ def test_type_validation(self): def test_partial_updates(self): """ Tests that partial udpates work as expected """ - m1 = TestSetModel.create(int_set={1,2,3,4}) + m1 = TestSetModel.create(int_set={1, 2, 3, 4}) m1.int_set.add(5) m1.int_set.remove(1) - assert m1.int_set == {2,3,4,5} + assert m1.int_set == {2, 3, 4, 5} m1.save() - m2 = TestSetModel.get(partition=m1.partition) - assert m2.int_set == {2,3,4,5} + m2 = TestSetModel.get(partition=m1.partition) + assert m2.int_set == {2, 3, 4, 5} def test_partial_update_creation(self): """ @@ -67,7 +66,7 @@ def test_partial_update_creation(self): """ ctx = {} col = columns.Set(columns.Integer, db_field="TEST") - statements = col.get_update_statement({1,2,3,4}, {2,3,4,5}, ctx) + statements = col.get_update_statement({1, 2, 3, 4}, {2, 3, 4, 5}, ctx) assert len([v for v in ctx.values() if {1} == v.value]) == 1 assert len([v for v in ctx.values() if {5} == v.value]) == 1 @@ -78,26 +77,26 @@ def test_update_from_none(self): """ Tests that updating an 'None' list creates a straight insert statement """ ctx = {} col = columns.Set(columns.Integer, db_field="TEST") - statements = col.get_update_statement({1,2,3,4}, None, ctx) + statements = col.get_update_statement({1, 2, 3, 4}, None, ctx) #only one variable /statement should be generated assert len(ctx) == 1 assert len(statements) == 1 - assert ctx.values()[0].value == {1,2,3,4} + assert ctx.values()[0].value == {1, 2, 3, 4} assert statements[0] == '"TEST" = :{}'.format(ctx.keys()[0]) def test_update_from_empty(self): """ Tests that updating an empty list creates a straight insert statement """ ctx = {} col = columns.Set(columns.Integer, db_field="TEST") - statements = col.get_update_statement({1,2,3,4}, set(), ctx) + statements = col.get_update_statement({1, 2, 3, 4}, set(), ctx) #only one variable /statement should be generated assert len(ctx) == 1 assert len(statements) == 1 - assert ctx.values()[0].value == {1,2,3,4} + assert ctx.values()[0].value == {1, 2, 3, 4} assert statements[0] == '"TEST" = :{}'.format(ctx.keys()[0]) def test_instantiation_with_column_class(self): @@ -117,13 +116,12 @@ def test_instantiation_with_column_instance(self): class TestListModel(Model): - partition = columns.UUID(primary_key=True, default=uuid4) - int_list = columns.List(columns.Integer, required=False) - text_list = columns.List(columns.Text, required=False) + partition = columns.UUID(primary_key=True, default=uuid4) + int_list = columns.List(columns.Integer, required=False) + text_list = columns.List(columns.Text, required=False) class TestListColumn(BaseCassEngTestCase): - @classmethod def setUpClass(cls): super(TestListColumn, cls).setUpClass() @@ -137,7 +135,7 @@ def tearDownClass(cls): def test_io_success(self): """ Tests that a basic usage works as expected """ - m1 = TestListModel.create(int_list=[1,2], text_list=['kai', 'andreas']) + m1 = TestListModel.create(int_list=[1, 2], text_list=['kai', 'andreas']) m2 = TestListModel.get(partition=m1.partition) assert isinstance(m2.int_list, list) @@ -168,7 +166,7 @@ def test_partial_updates(self): m1.int_list = final m1.save() - m2 = TestListModel.get(partition=m1.partition) + m2 = TestListModel.get(partition=m1.partition) assert list(m2.int_list) == final def test_partial_update_creation(self): @@ -180,8 +178,8 @@ def test_partial_update_creation(self): col = columns.List(columns.Integer, db_field="TEST") statements = col.get_update_statement(final, initial, ctx) - assert len([v for v in ctx.values() if [2,1,0] == v.value]) == 1 - assert len([v for v in ctx.values() if [7,8,9] == v.value]) == 1 + assert len([v for v in ctx.values() if [2, 1, 0] == v.value]) == 1 + assert len([v for v in ctx.values() if [7, 8, 9] == v.value]) == 1 assert len([s for s in statements if '"TEST" = "TEST" +' in s]) == 1 assert len([s for s in statements if '+ "TEST"' in s]) == 1 @@ -208,7 +206,7 @@ def test_update_from_empty(self): assert len(ctx) == 1 assert len(statements) == 1 - assert ctx.values()[0].value == [1,2,3] + assert ctx.values()[0].value == [1, 2, 3] assert statements[0] == '"TEST" = :{}'.format(ctx.keys()[0]) def test_instantiation_with_column_class(self): @@ -228,13 +226,12 @@ def test_instantiation_with_column_instance(self): class TestMapModel(Model): - partition = columns.UUID(primary_key=True, default=uuid4) - int_map = columns.Map(columns.Integer, columns.UUID, required=False) - text_map = columns.Map(columns.Text, columns.DateTime, required=False) + partition = columns.UUID(primary_key=True, default=uuid4) + int_map = columns.Map(columns.Integer, columns.UUID, required=False) + text_map = columns.Map(columns.Text, columns.DateTime, required=False) class TestMapColumn(BaseCassEngTestCase): - @classmethod def setUpClass(cls): super(TestMapColumn, cls).setUpClass() @@ -252,7 +249,7 @@ def test_io_success(self): k2 = uuid4() now = datetime.now() then = now + timedelta(days=1) - m1 = TestMapModel.create(int_map={1:k1,2:k2}, text_map={'now':now, 'then':then}) + m1 = TestMapModel.create(int_map={1: k1, 2: k2}, text_map={'now': now, 'then': then}) m2 = TestMapModel.get(partition=m1.partition) assert isinstance(m2.int_map, dict) @@ -273,7 +270,7 @@ def test_type_validation(self): Tests that attempting to use the wrong types will raise an exception """ with self.assertRaises(ValidationError): - TestMapModel.create(int_map={'key':2,uuid4():'val'}, text_map={2:5}) + TestMapModel.create(int_map={'key': 2, uuid4(): 'val'}, text_map={2: 5}) def test_partial_updates(self): """ Tests that partial udpates work as expected """ @@ -284,21 +281,21 @@ def test_partial_updates(self): earlier = early - timedelta(minutes=30) later = now + timedelta(minutes=30) - initial = {'now':now, 'early':earlier} - final = {'later':later, 'early':early} + initial = {'now': now, 'early': earlier} + final = {'later': later, 'early': early} m1 = TestMapModel.create(text_map=initial) m1.text_map = final m1.save() - m2 = TestMapModel.get(partition=m1.partition) + m2 = TestMapModel.get(partition=m1.partition) assert m2.text_map == final def test_updates_from_none(self): """ Tests that updates from None work as expected """ m = TestMapModel.create(int_map=None) - expected = {1:uuid4()} + expected = {1: uuid4()} m.int_map = expected m.save() @@ -308,7 +305,7 @@ def test_updates_from_none(self): def test_updates_to_none(self): """ Tests that setting the field to None works as expected """ - m = TestMapModel.create(int_map={1:uuid4()}) + m = TestMapModel.create(int_map={1: uuid4()}) m.int_map = None m.save() From f65b39ee2b6a679f1360498d9e7e07562b4d6162 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Tue, 25 Jun 2013 13:48:21 -0700 Subject: [PATCH 0288/3726] changelog update and version bump --- changelog | 4 ++++ cqlengine/__init__.py | 2 +- docs/conf.py | 4 ++-- setup.py | 2 +- 4 files changed, 8 insertions(+), 4 deletions(-) diff --git a/changelog b/changelog index 1c147a6706..5c70db60b6 100644 --- a/changelog +++ b/changelog @@ -1,5 +1,9 @@ CHANGELOG +0.4.4 +* adding query logging back +* fixed bug updating an empty list column + 0.4.3 * fixed bug with Text column validation diff --git a/cqlengine/__init__.py b/cqlengine/__init__.py index 2ab73db5d1..98c57ebebd 100644 --- a/cqlengine/__init__.py +++ b/cqlengine/__init__.py @@ -3,5 +3,5 @@ from cqlengine.models import Model from cqlengine.query import BatchQuery -__version__ = '0.4.3' +__version__ = '0.4.4' diff --git a/docs/conf.py b/docs/conf.py index 3de8e4dd9f..8bbd4c3575 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -48,9 +48,9 @@ # built documents. # # The short X.Y version. -version = '0.4.3' +version = '0.4.4' # The full version, including alpha/beta/rc tags. -release = '0.4.3' +release = '0.4.4' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/setup.py b/setup.py index 99fa7b3f69..bedf371d88 100644 --- a/setup.py +++ b/setup.py @@ -4,7 +4,7 @@ #python setup.py register #python setup.py sdist upload -version = '0.4.3' +version = '0.4.4' long_desc = """ Cassandra CQL 3 Object Mapper for Python From 4b97781a7bdd15d172adb8231d41f5412ff33c61 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Tue, 25 Jun 2013 15:00:44 -0700 Subject: [PATCH 0289/3726] adding to_python call to value column in container column to_python methods --- cqlengine/columns.py | 9 +++- .../tests/columns/test_container_columns.py | 41 +++++++++++++++++++ 2 files changed, 48 insertions(+), 2 deletions(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index 9c5199866f..a4dc26d09f 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -489,6 +489,10 @@ def validate(self, value): return {self.value_col.validate(v) for v in val} + def to_python(self, value): + if value is None: return None + return {self.value_col.to_python(v) for v in value} + def to_database(self, value): if value is None: return None if isinstance(value, self.Quoter): return value @@ -560,7 +564,7 @@ def validate(self, value): def to_python(self, value): if value is None: return None - return list(value) + return [self.value_col.to_python(v) for v in value] def to_database(self, value): if value is None: return None @@ -643,6 +647,7 @@ def _insert(): return statements + class Map(BaseContainerColumn): """ Stores a key -> value map (dictionary) @@ -698,7 +703,7 @@ def validate(self, value): def to_python(self, value): if value is not None: - return {self.key_col.to_python(k):self.value_col.to_python(v) for k,v in value.items()} + return {self.key_col.to_python(k): self.value_col.to_python(v) for k,v in value.items()} def to_database(self, value): if value is None: return None diff --git a/cqlengine/tests/columns/test_container_columns.py b/cqlengine/tests/columns/test_container_columns.py index 227502b12a..2134764dde 100644 --- a/cqlengine/tests/columns/test_container_columns.py +++ b/cqlengine/tests/columns/test_container_columns.py @@ -1,4 +1,5 @@ from datetime import datetime, timedelta +import json from uuid import uuid4 from cqlengine import Model, ValidationError @@ -13,6 +14,20 @@ class TestSetModel(Model): text_set = columns.Set(columns.Text, required=False) +class JsonTestColumn(columns.Column): + db_type = 'text' + + def to_python(self, value): + if value is None: return + if isinstance(value, basestring): + return json.loads(value) + else: + return value + + def to_database(self, value): + if value is None: return + return json.dumps(value) + class TestSetColumn(BaseCassEngTestCase): @classmethod def setUpClass(cls): @@ -114,6 +129,15 @@ def test_instantiation_with_column_instance(self): column = columns.Set(columns.Text(min_length=100)) assert isinstance(column.value_col, columns.Text) + def test_to_python(self): + """ Tests that to_python of value column is called """ + column = columns.Set(JsonTestColumn) + val = {1, 2, 3} + db_val = column.to_database(val) + assert db_val.value == {json.dumps(v) for v in val} + py_val = column.to_python(db_val.value) + assert py_val == val + class TestListModel(Model): partition = columns.UUID(primary_key=True, default=uuid4) @@ -224,6 +248,14 @@ def test_instantiation_with_column_instance(self): column = columns.List(columns.Text(min_length=100)) assert isinstance(column.value_col, columns.Text) + def test_to_python(self): + """ Tests that to_python of value column is called """ + column = columns.List(JsonTestColumn) + val = [1, 2, 3] + db_val = column.to_database(val) + assert db_val.value == [json.dumps(v) for v in val] + py_val = column.to_python(db_val.value) + assert py_val == val class TestMapModel(Model): partition = columns.UUID(primary_key=True, default=uuid4) @@ -329,6 +361,15 @@ def test_instantiation_with_column_instance(self): assert isinstance(column.key_col, columns.Text) assert isinstance(column.value_col, columns.Integer) + def test_to_python(self): + """ Tests that to_python of value column is called """ + column = columns.Map(JsonTestColumn, JsonTestColumn) + val = {1: 2, 3: 4, 5: 6} + db_val = column.to_database(val) + assert db_val.value == {json.dumps(k):json.dumps(v) for k,v in val.items()} + py_val = column.to_python(db_val.value) + assert py_val == val + # def test_partial_update_creation(self): # """ # Tests that proper update statements are created for a partial list update From 479224df322104776dda3c6f586f7339a8747ffe Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Tue, 25 Jun 2013 15:02:33 -0700 Subject: [PATCH 0290/3726] updating changelog and version --- changelog | 3 +++ cqlengine/__init__.py | 2 +- docs/conf.py | 4 ++-- setup.py | 2 +- 4 files changed, 7 insertions(+), 4 deletions(-) diff --git a/changelog b/changelog index 5c70db60b6..7a68e4f84b 100644 --- a/changelog +++ b/changelog @@ -1,5 +1,8 @@ CHANGELOG +0.4.5 +* fixed bug where container columns would not call their child to_python method, this only really affected columns with special to_python logic + 0.4.4 * adding query logging back * fixed bug updating an empty list column diff --git a/cqlengine/__init__.py b/cqlengine/__init__.py index 98c57ebebd..f246c6a70a 100644 --- a/cqlengine/__init__.py +++ b/cqlengine/__init__.py @@ -3,5 +3,5 @@ from cqlengine.models import Model from cqlengine.query import BatchQuery -__version__ = '0.4.4' +__version__ = '0.4.5' diff --git a/docs/conf.py b/docs/conf.py index 8bbd4c3575..cadd3038d6 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -48,9 +48,9 @@ # built documents. # # The short X.Y version. -version = '0.4.4' +version = '0.4.5' # The full version, including alpha/beta/rc tags. -release = '0.4.4' +release = '0.4.5' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/setup.py b/setup.py index bedf371d88..326913033d 100644 --- a/setup.py +++ b/setup.py @@ -4,7 +4,7 @@ #python setup.py register #python setup.py sdist upload -version = '0.4.4' +version = '0.4.5' long_desc = """ Cassandra CQL 3 Object Mapper for Python From 044e9fb933a6e8f0af5b7dc583719ed6f6140f3f Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Tue, 25 Jun 2013 15:30:48 -0700 Subject: [PATCH 0291/3726] updating value quoter equal function to compare values not repr --- cqlengine/columns.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index 0c4e62467b..fe205ae884 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -67,7 +67,7 @@ def __repr__(self): def __eq__(self, other): if isinstance(other, self.__class__): - return repr(self) == repr(other) + return self.value == other.value return False From e722b505464e3f8c8a76fd564653aed97b69ebce Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Tue, 25 Jun 2013 15:34:35 -0700 Subject: [PATCH 0292/3726] updating equality operations to stop relying on the to_database dictionary conversion --- cqlengine/models.py | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/cqlengine/models.py b/cqlengine/models.py index a0c41be1ce..609e055933 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -186,7 +186,21 @@ def _get_column(cls, name): return cls._columns[name] def __eq__(self, other): - return self.as_dict() == other.as_dict() + if self.__class__ != other.__class__: + return False + + # check attribute keys + keys = set(self._columns.keys()) + other_keys = set(self._columns.keys()) + if keys != other_keys: + return False + + # check that all of the attributes match + for key in other_keys: + if getattr(self, key, None) != getattr(other, key, None): + return False + + return True def __ne__(self, other): return not self.__eq__(other) From 7a4c7235006da305273b49c6a28fe83497b9dec1 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Tue, 25 Jun 2013 15:46:38 -0700 Subject: [PATCH 0293/3726] renaming as_dict to _as_dict --- cqlengine/models.py | 2 +- cqlengine/query.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cqlengine/models.py b/cqlengine/models.py index 609e055933..8849324943 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -232,7 +232,7 @@ def validate(self): val = col.validate(getattr(self, name)) setattr(self, name, val) - def as_dict(self): + def _as_dict(self): """ Returns a map of column names to cleaned values """ values = self._dynamic_columns or {} for name, col in self._columns.items(): diff --git a/cqlengine/query.py b/cqlengine/query.py index b01e1a9ea3..b8b4926dea 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -794,7 +794,7 @@ def save(self): #organize data value_pairs = [] - values = self.instance.as_dict() + values = self.instance._as_dict() #get defined fields and their column names for name, col in self.model._columns.items(): From ed0bb798962205b699da5c0ddf4e830ad6afcb2a Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Tue, 25 Jun 2013 15:47:41 -0700 Subject: [PATCH 0294/3726] updating changelog and version --- changelog | 3 +++ cqlengine/__init__.py | 2 +- docs/conf.py | 4 ++-- setup.py | 2 +- 4 files changed, 7 insertions(+), 4 deletions(-) diff --git a/changelog b/changelog index 7a68e4f84b..35981af32e 100644 --- a/changelog +++ b/changelog @@ -1,5 +1,8 @@ CHANGELOG +0.4.6 +* fixing the way models are compared + 0.4.5 * fixed bug where container columns would not call their child to_python method, this only really affected columns with special to_python logic diff --git a/cqlengine/__init__.py b/cqlengine/__init__.py index f246c6a70a..a07df1f2e7 100644 --- a/cqlengine/__init__.py +++ b/cqlengine/__init__.py @@ -3,5 +3,5 @@ from cqlengine.models import Model from cqlengine.query import BatchQuery -__version__ = '0.4.5' +__version__ = '0.4.6' diff --git a/docs/conf.py b/docs/conf.py index cadd3038d6..880cbeb40b 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -48,9 +48,9 @@ # built documents. # # The short X.Y version. -version = '0.4.5' +version = '0.4.6' # The full version, including alpha/beta/rc tags. -release = '0.4.5' +release = '0.4.6' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/setup.py b/setup.py index 326913033d..c00d3ab982 100644 --- a/setup.py +++ b/setup.py @@ -4,7 +4,7 @@ #python setup.py register #python setup.py sdist upload -version = '0.4.5' +version = '0.4.6' long_desc = """ Cassandra CQL 3 Object Mapper for Python From f90cfb046a7e727a160e4e331d799064d1588c06 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 27 Jun 2013 13:10:16 -0700 Subject: [PATCH 0295/3726] adding support for setting a query's batch object to None --- cqlengine/query.py | 24 +++++++++++------------ cqlengine/tests/query/test_batch_query.py | 21 +++++++++++++++++++- 2 files changed, 32 insertions(+), 13 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index b8b4926dea..d86b504bab 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -428,8 +428,8 @@ def batch(self, batch_obj): :param batch_obj: :return: """ - if not isinstance(batch_obj, BatchQuery): - raise CQLEngineException('batch_obj must be a BatchQuery instance') + if batch_obj is not None and not isinstance(batch_obj, BatchQuery): + raise CQLEngineException('batch_obj must be a BatchQuery instance or None') clone = copy.deepcopy(self) clone._batch = batch_obj return clone @@ -772,13 +772,13 @@ def __init__(self, model, instance=None, batch=None): self.model = model self.column_family_name = self.model.column_family_name() self.instance = instance - self.batch = batch + self._batch = batch pass def batch(self, batch_obj): - if not isinstance(batch_obj, BatchQuery): - raise CQLEngineException('batch_obj must be a BatchQuery instance') - self.batch = batch_obj + if batch_obj is not None and not isinstance(batch_obj, BatchQuery): + raise CQLEngineException('batch_obj must be a BatchQuery instance or None') + self._batch = batch_obj return self def save(self): @@ -852,8 +852,8 @@ def save(self): # skip query execution if it's empty # caused by pointless update queries if qs: - if self.batch: - self.batch.add_query(qs, query_values) + if self._batch: + self._batch.add_query(qs, query_values) else: execute(qs, query_values) @@ -885,8 +885,8 @@ def save(self): qs = ' '.join(qs) - if self.batch: - self.batch.add_query(qs, query_values) + if self._batch: + self._batch.add_query(qs, query_values) else: execute(qs, query_values) @@ -906,8 +906,8 @@ def delete(self): qs += [' AND '.join(where_statements)] qs = ' '.join(qs) - if self.batch: - self.batch.add_query(qs, field_values) + if self._batch: + self._batch.add_query(qs, field_values) else: execute(qs, field_values) diff --git a/cqlengine/tests/query/test_batch_query.py b/cqlengine/tests/query/test_batch_query.py index 5b31826a39..37e8d28d56 100644 --- a/cqlengine/tests/query/test_batch_query.py +++ b/cqlengine/tests/query/test_batch_query.py @@ -4,7 +4,7 @@ import random from cqlengine import Model, columns from cqlengine.management import delete_table, create_table -from cqlengine.query import BatchQuery +from cqlengine.query import BatchQuery, DMLQuery from cqlengine.tests.base import BaseCassEngTestCase class TestMultiKeyModel(Model): @@ -104,3 +104,22 @@ def test_bulk_delete_success_case(self): for m in TestMultiKeyModel.all(): m.delete() + def test_none_success_case(self): + """ Tests that passing None into the batch call clears any batch object """ + b = BatchQuery() + + q = TestMultiKeyModel.objects.batch(b) + assert q._batch == b + + q = q.batch(None) + assert q._batch is None + + def test_dml_none_success_case(self): + """ Tests that passing None into the batch call clears any batch object """ + b = BatchQuery() + + q = DMLQuery(TestMultiKeyModel, batch=b) + assert q._batch == b + + q.batch(None) + assert q._batch is None From 01e9747411452e889f2aebeda26c6c8ea2727930 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 27 Jun 2013 13:11:50 -0700 Subject: [PATCH 0296/3726] changelog and version update --- changelog | 3 +++ cqlengine/__init__.py | 2 +- docs/conf.py | 4 ++-- setup.py | 2 +- 4 files changed, 7 insertions(+), 4 deletions(-) diff --git a/changelog b/changelog index 35981af32e..6d49d52889 100644 --- a/changelog +++ b/changelog @@ -1,5 +1,8 @@ CHANGELOG +0.4.7 +* adding support for passing None into query batch methods to clear any batch objects + 0.4.6 * fixing the way models are compared diff --git a/cqlengine/__init__.py b/cqlengine/__init__.py index a07df1f2e7..d9763109b4 100644 --- a/cqlengine/__init__.py +++ b/cqlengine/__init__.py @@ -3,5 +3,5 @@ from cqlengine.models import Model from cqlengine.query import BatchQuery -__version__ = '0.4.6' +__version__ = '0.4.7' diff --git a/docs/conf.py b/docs/conf.py index 880cbeb40b..2b29bbfe6d 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -48,9 +48,9 @@ # built documents. # # The short X.Y version. -version = '0.4.6' +version = '0.4.7' # The full version, including alpha/beta/rc tags. -release = '0.4.6' +release = '0.4.7' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/setup.py b/setup.py index c00d3ab982..9865237f08 100644 --- a/setup.py +++ b/setup.py @@ -4,7 +4,7 @@ #python setup.py register #python setup.py sdist upload -version = '0.4.6' +version = '0.4.7' long_desc = """ Cassandra CQL 3 Object Mapper for Python From 75d20b69200e99f8ff209530c6c824505b2f93f1 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 27 Jun 2013 14:22:47 -0700 Subject: [PATCH 0297/3726] fixed empty sets, maps, lists --- cqlengine/columns.py | 15 +++++++++----- .../tests/columns/test_container_columns.py | 20 +++++++++++++++++++ 2 files changed, 30 insertions(+), 5 deletions(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index fe205ae884..f8d262dfe5 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -473,14 +473,15 @@ def __str__(self): cq = cql_quote return '{' + ', '.join([cq(v) for v in self.value]) + '}' - def __init__(self, value_type, strict=True, **kwargs): + def __init__(self, value_type, strict=True, default=set, **kwargs): """ :param value_type: a column class indicating the types of the value :param strict: sets whether non set values will be coerced to set type on validation, or raise a validation error, defaults to True """ self.strict = strict - super(Set, self).__init__(value_type, **kwargs) + + super(Set, self).__init__(value_type, default=default, **kwargs) def validate(self, value): val = super(Set, self).validate(value) @@ -495,11 +496,12 @@ def validate(self, value): return {self.value_col.validate(v) for v in val} def to_python(self, value): - if value is None: return None + if value is None: return set() return {self.value_col.to_python(v) for v in value} def to_database(self, value): if value is None: return None + if isinstance(value, self.Quoter): return value return self.Quoter({self.value_col.to_database(v) for v in value}) @@ -560,6 +562,9 @@ def __str__(self): cq = cql_quote return '[' + ', '.join([cq(v) for v in self.value]) + ']' + def __init__(self, value_type, default=set, **kwargs): + return super(List, self).__init__(value_type=value_type, default=default, **kwargs) + def validate(self, value): val = super(List, self).validate(value) if val is None: return @@ -668,7 +673,7 @@ def __str__(self): cq = cql_quote return '{' + ', '.join([cq(k) + ':' + cq(v) for k,v in self.value.items()]) + '}' - def __init__(self, key_type, value_type, **kwargs): + def __init__(self, key_type, value_type, default=dict, **kwargs): """ :param key_type: a column class indicating the types of the key :param value_type: a column class indicating the types of the value @@ -687,7 +692,7 @@ def __init__(self, key_type, value_type, **kwargs): else: self.key_col = key_type self.key_type = self.key_col.__class__ - super(Map, self).__init__(value_type, **kwargs) + super(Map, self).__init__(value_type, default=default, **kwargs) def get_column_def(self): """ diff --git a/cqlengine/tests/columns/test_container_columns.py b/cqlengine/tests/columns/test_container_columns.py index 2134764dde..180378dec0 100644 --- a/cqlengine/tests/columns/test_container_columns.py +++ b/cqlengine/tests/columns/test_container_columns.py @@ -40,6 +40,16 @@ def tearDownClass(cls): super(TestSetColumn, cls).tearDownClass() delete_table(TestSetModel) + + def test_empty_set_initial(self): + """ + tests that sets are set() by default, should never be none + :return: + """ + m = TestSetModel.create() + m.int_set.add(5) + m.save() + def test_io_success(self): """ Tests that a basic usage works as expected """ m1 = TestSetModel.create(int_set={1, 2}, text_set={'kai', 'andreas'}) @@ -129,6 +139,8 @@ def test_instantiation_with_column_instance(self): column = columns.Set(columns.Text(min_length=100)) assert isinstance(column.value_col, columns.Text) + + def test_to_python(self): """ Tests that to_python of value column is called """ column = columns.Set(JsonTestColumn) @@ -157,6 +169,10 @@ def tearDownClass(cls): super(TestListColumn, cls).tearDownClass() delete_table(TestListModel) + def test_initial(self): + tmp = TestListModel.create() + tmp.int_list.append(1) + def test_io_success(self): """ Tests that a basic usage works as expected """ m1 = TestListModel.create(int_list=[1, 2], text_list=['kai', 'andreas']) @@ -275,6 +291,10 @@ def tearDownClass(cls): super(TestMapColumn, cls).tearDownClass() delete_table(TestMapModel) + def test_empty_default(self): + tmp = TestMapModel.create() + tmp.int_map['blah'] = 1 + def test_io_success(self): """ Tests that a basic usage works as expected """ k1 = uuid4() From 5cc63e67cfb8aec15ff1d0a4fc8240cafd64cd71 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 27 Jun 2013 14:58:32 -0700 Subject: [PATCH 0298/3726] CQL version file instead of manually updating --- cqlengine/VERSION | 1 + cqlengine/__init__.py | 4 +++- docs/conf.py | 7 +++++-- setup.py | 2 +- 4 files changed, 10 insertions(+), 4 deletions(-) create mode 100644 cqlengine/VERSION diff --git a/cqlengine/VERSION b/cqlengine/VERSION new file mode 100644 index 0000000000..cb498ab2c8 --- /dev/null +++ b/cqlengine/VERSION @@ -0,0 +1 @@ +0.4.8 diff --git a/cqlengine/__init__.py b/cqlengine/__init__.py index d9763109b4..bdcaa6e420 100644 --- a/cqlengine/__init__.py +++ b/cqlengine/__init__.py @@ -3,5 +3,7 @@ from cqlengine.models import Model from cqlengine.query import BatchQuery -__version__ = '0.4.7' +__cqlengine_version_path__ = os.path.realpath(__file__ + '/../VERSION') +__version__ = open(__cqlengine_version_path__, 'r').readline().strip() + diff --git a/docs/conf.py b/docs/conf.py index 2b29bbfe6d..847ef50dce 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -47,10 +47,13 @@ # |version| and |release|, also used in various other places throughout the # built documents. # +__cqlengine_version_path__ = os.path.realpath(__file__ + '/../VERSION') # The short X.Y version. -version = '0.4.7' +version = open(__cqlengine_version_path__, 'r').readline().strip() # The full version, including alpha/beta/rc tags. -release = '0.4.7' +release = version + + # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/setup.py b/setup.py index 9865237f08..5fd441428a 100644 --- a/setup.py +++ b/setup.py @@ -4,7 +4,7 @@ #python setup.py register #python setup.py sdist upload -version = '0.4.7' +version = open('cqlengine/VERSION', 'r').readline().strip() long_desc = """ Cassandra CQL 3 Object Mapper for Python From 7ab977b59c540c2746cd1c2fe09e84b1b2016870 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 27 Jun 2013 15:01:15 -0700 Subject: [PATCH 0299/3726] fixed import --- cqlengine/__init__.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cqlengine/__init__.py b/cqlengine/__init__.py index bdcaa6e420..27ef399572 100644 --- a/cqlengine/__init__.py +++ b/cqlengine/__init__.py @@ -1,3 +1,5 @@ +import os + from cqlengine.columns import * from cqlengine.functions import * from cqlengine.models import Model From e2dafaa25caea0f137ad78bd4c33af8d65756ed5 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 27 Jun 2013 15:30:02 -0700 Subject: [PATCH 0300/3726] fixes for pulling data out of DB with empty sets, lists, dict --- cqlengine/columns.py | 4 +++- cqlengine/models.py | 3 ++- .../tests/columns/test_container_columns.py | 19 ++++++++++++++++++- 3 files changed, 23 insertions(+), 3 deletions(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index f8d262dfe5..3b7a22d06e 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -573,7 +573,7 @@ def validate(self, value): return [self.value_col.validate(v) for v in val] def to_python(self, value): - if value is None: return None + if value is None: return [] return [self.value_col.to_python(v) for v in value] def to_database(self, value): @@ -712,6 +712,8 @@ def validate(self, value): return {self.key_col.validate(k):self.value_col.validate(v) for k,v in val.items()} def to_python(self, value): + if value is None: + return {} if value is not None: return {self.key_col.to_python(k): self.value_col.to_python(v) for k,v in value.items()} diff --git a/cqlengine/models.py b/cqlengine/models.py index 8849324943..a2d9b6abce 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -149,7 +149,8 @@ def __init__(self, **values): for name, column in self._columns.items(): value = values.get(name, None) - if value is not None: value = column.to_python(value) + if value is not None or isinstance(column, columns.BaseContainerColumn): + value = column.to_python(value) value_mngr = column.value_manager(self, column, value) self._values[name] = value_mngr diff --git a/cqlengine/tests/columns/test_container_columns.py b/cqlengine/tests/columns/test_container_columns.py index 180378dec0..6cf218299c 100644 --- a/cqlengine/tests/columns/test_container_columns.py +++ b/cqlengine/tests/columns/test_container_columns.py @@ -50,6 +50,11 @@ def test_empty_set_initial(self): m.int_set.add(5) m.save() + def test_empty_set_retrieval(self): + m = TestSetModel.create() + m2 = TestSetModel.get(partition=m.partition) + m2.int_set.add(3) + def test_io_success(self): """ Tests that a basic usage works as expected """ m1 = TestSetModel.create(int_set={1, 2}, text_set={'kai', 'andreas'}) @@ -173,6 +178,11 @@ def test_initial(self): tmp = TestListModel.create() tmp.int_list.append(1) + def test_initial(self): + tmp = TestListModel.create() + tmp2 = TestListModel.get(partition=tmp.partition) + tmp2.int_list.append(1) + def test_io_success(self): """ Tests that a basic usage works as expected """ m1 = TestListModel.create(int_list=[1, 2], text_list=['kai', 'andreas']) @@ -295,6 +305,13 @@ def test_empty_default(self): tmp = TestMapModel.create() tmp.int_map['blah'] = 1 + def test_empty_retrieve(self): + tmp = TestMapModel.create() + tmp2 = TestMapModel.get(partition=tmp.partition) + tmp2.int_map['blah'] = 1 + + + def test_io_success(self): """ Tests that a basic usage works as expected """ k1 = uuid4() @@ -362,7 +379,7 @@ def test_updates_to_none(self): m.save() m2 = TestMapModel.get(partition=m.partition) - assert m2.int_map is None + assert m2.int_map == {} def test_instantiation_with_column_class(self): """ From 62fdaa5542d80be291f1826f108ebac1a4b426bc Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 27 Jun 2013 15:30:52 -0700 Subject: [PATCH 0301/3726] version bump --- cqlengine/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cqlengine/VERSION b/cqlengine/VERSION index cb498ab2c8..76914ddc02 100644 --- a/cqlengine/VERSION +++ b/cqlengine/VERSION @@ -1 +1 @@ -0.4.8 +0.4.9 From db60c151817794dc11b3eb309ba5a22341b64eb2 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 27 Jun 2013 16:03:12 -0700 Subject: [PATCH 0302/3726] batch docs for deletion --- docs/topics/queryset.rst | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/docs/topics/queryset.rst b/docs/topics/queryset.rst index 9248e1acb7..cb1fdbb836 100644 --- a/docs/topics/queryset.rst +++ b/docs/topics/queryset.rst @@ -306,6 +306,12 @@ Batch Queries em2.batch(b).save() b.execute() + # deleting in a batch + b = BatchQuery() + ExampleModel.objects(id=some_id).batch(b).delete() + ExampleModel.objects(id=some_id2).batch(b).delete() + b.execute() + QuerySet method reference ========================= From cd7a36665f94ee0b03bac8d9048f8f1e6f5a70f1 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 30 Jun 2013 18:25:13 -0700 Subject: [PATCH 0303/3726] updating field placeholders to use uuid4 instead of uuid1 due to collision0 which appear in some Virtualbox systems fixes #82 --- cqlengine/query.py | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index d86b504bab..16f6e806b3 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -1,9 +1,6 @@ -from collections import namedtuple import copy from datetime import datetime -from hashlib import md5 -from time import time -from uuid import uuid1 +from uuid import uuid4 from cqlengine import BaseContainerColumn, BaseValueManager, Map, columns from cqlengine.connection import connection_pool, connection_manager, execute @@ -122,7 +119,7 @@ class EqualsOperator(QueryOperator): class IterableQueryValue(QueryValue): def __init__(self, value): try: - super(IterableQueryValue, self).__init__(value, [uuid1().hex for i in value]) + super(IterableQueryValue, self).__init__(value, [uuid4().hex for i in value]) except TypeError: raise QueryException("in operator arguments must be iterable, {} found".format(value)) @@ -804,7 +801,7 @@ def save(self): #construct query string field_names = zip(*value_pairs)[0] - field_ids = {n:uuid1().hex for n in field_names} + field_ids = {n:uuid4().hex for n in field_names} field_values = dict(value_pairs) query_values = {field_ids[n]:field_values[n] for n in field_names} @@ -878,7 +875,7 @@ def save(self): qs += ['WHERE'] where_statements = [] for name, col in self.model._primary_keys.items(): - field_id = uuid1().hex + field_id = uuid4().hex query_values[field_id] = field_values[name] where_statements += ['"{}" = :{}'.format(col.db_field_name, field_id)] qs += [' AND '.join(where_statements)] @@ -899,7 +896,7 @@ def delete(self): qs += ['WHERE'] where_statements = [] for name, col in self.model._primary_keys.items(): - field_id = uuid1().hex + field_id = uuid4().hex field_values[field_id] = col.to_database(getattr(self.instance, name)) where_statements += ['"{}" = :{}'.format(col.db_field_name, field_id)] From 49acd4b271bc9510201c518904e4b14dec527865 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 30 Jun 2013 18:27:33 -0700 Subject: [PATCH 0304/3726] updating changelog and version --- changelog | 3 +++ cqlengine/VERSION | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/changelog b/changelog index 6d49d52889..d090b319c6 100644 --- a/changelog +++ b/changelog @@ -1,5 +1,8 @@ CHANGELOG +0.4.10 +* changing query parameter placeholders from uuid1 to uuid4 + 0.4.7 * adding support for passing None into query batch methods to clear any batch objects diff --git a/cqlengine/VERSION b/cqlengine/VERSION index 76914ddc02..e8423da873 100644 --- a/cqlengine/VERSION +++ b/cqlengine/VERSION @@ -1 +1 @@ -0.4.9 +0.4.10 From d417e09eb508e1b81133a0b28cbac9c4715a9fde Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Mon, 1 Jul 2013 16:01:32 -0700 Subject: [PATCH 0305/3726] modifying execute function to just return the connection pool Conflicts: cqlengine/connection.py --- cqlengine/connection.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index 4119d7f9bc..03810c57ee 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -156,7 +156,8 @@ def execute(query, params={}): @contextmanager def connection_manager(): + """ :rtype: ConnectionPool """ global connection_pool - tmp = connection_pool.get() - yield tmp - connection_pool.put(tmp) + # tmp = connection_pool.get() + yield connection_pool + # connection_pool.put(tmp) From 85d75512537f3154d45c01a666d68885baf90374 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Mon, 1 Jul 2013 16:02:56 -0700 Subject: [PATCH 0306/3726] updating row serialization Conflicts: cqlengine/connection.py cqlengine/query.py --- cqlengine/connection.py | 11 +++++++++++ cqlengine/management.py | 2 ++ cqlengine/query.py | 16 +++++++--------- 3 files changed, 20 insertions(+), 9 deletions(-) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index 03810c57ee..cca31b118b 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -27,6 +27,17 @@ class CQLConnectionError(CQLEngineException): pass # global connection pool connection_pool = None + +class CQLConnectionError(CQLEngineException): pass + + +class RowResult(tuple): pass + + +def _column_tuple_factory(colnames, values): + return tuple(colnames), [RowResult(v) for v in values] + + def setup(hosts, username=None, password=None, max_connections=10, default_keyspace=None, consistency='ONE'): """ Records the hosts and connects to one of them diff --git a/cqlengine/management.py b/cqlengine/management.py index b3504b919e..dbe6a497ae 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -40,11 +40,13 @@ def create_keyspace(name, strategy_class='SimpleStrategy', replication_factor=3, execute(query) + def delete_keyspace(name): with connection_manager() as con: if name in [k.name for k in con.client.describe_keyspaces()]: execute("DROP KEYSPACE {}".format(name)) + def create_table(model, create_missing_keyspace=True): if model.__abstract__: diff --git a/cqlengine/query.py b/cqlengine/query.py index 16f6e806b3..4cf1a86711 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -3,7 +3,7 @@ from uuid import uuid4 from cqlengine import BaseContainerColumn, BaseValueManager, Map, columns -from cqlengine.connection import connection_pool, connection_manager, execute +from cqlengine.connection import connection_manager, execute, RowResult from cqlengine.exceptions import CQLEngineException from cqlengine.functions import QueryValue, Token @@ -353,11 +353,8 @@ def _execute_query(self): if self._batch: raise CQLEngineException("Only inserts, updates, and deletes are available in batch mode") if self._result_cache is None: - self._cur = execute(self._select_query(), self._where_values()) - self._result_cache = [None]*self._cur.rowcount - if self._cur.description: - names = [i[0] for i in self._cur.description] - self._construct_result = self._create_result_constructor(names) + columns, self._result_cache = execute(self._select_query(), self._where_values()) + self._construct_result = self._create_result_constructor(columns) def _fill_result_cache_to_idx(self, idx): self._execute_query() @@ -368,7 +365,9 @@ def _fill_result_cache_to_idx(self, idx): if qty < 1: return else: - for values in self._cur.fetchmany(qty): + start = max(0, self._result_idx) + stop = start + qty + for values in self._result_cache[start:stop]: self._result_idx += 1 self._result_cache[self._result_idx] = self._construct_result(values) @@ -382,7 +381,7 @@ def __iter__(self): for idx in range(len(self._result_cache)): instance = self._result_cache[idx] - if instance is None: + if isinstance(instance, RowResult): self._fill_result_cache_to_idx(idx) yield self._result_cache[idx] @@ -716,7 +715,6 @@ def _construct_instance(values): return instance return _construct_instance - columns = [model._columns[db_map[name]] for name in names] if self._flat_values_list: return (lambda values: columns[0].to_python(values[0])) else: From 0d1ae6e6ca28554e6305ff33ad7c4779683f5413 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Wed, 26 Jun 2013 08:04:15 -0700 Subject: [PATCH 0307/3726] fixing a problem with the result cache filler --- cqlengine/connection.py | 3 ++- cqlengine/query.py | 6 ++---- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index cca31b118b..8b3a8f0479 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -31,7 +31,8 @@ class CQLConnectionError(CQLEngineException): pass class CQLConnectionError(CQLEngineException): pass -class RowResult(tuple): pass +class RowResult(tuple): + pass def _column_tuple_factory(colnames, values): diff --git a/cqlengine/query.py b/cqlengine/query.py index 4cf1a86711..ea81376f7c 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -365,11 +365,9 @@ def _fill_result_cache_to_idx(self, idx): if qty < 1: return else: - start = max(0, self._result_idx) - stop = start + qty - for values in self._result_cache[start:stop]: + for idx in range(qty): self._result_idx += 1 - self._result_cache[self._result_idx] = self._construct_result(values) + self._result_cache[self._result_idx] = self._construct_result(self._result_cache[self._result_idx]) #return the connection to the connection pool if we have all objects if self._result_cache and self._result_idx == (len(self._result_cache) - 1): From d3137dc321fa82aa848a968e406e12f51fdb8f52 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Mon, 1 Jul 2013 16:04:15 -0700 Subject: [PATCH 0308/3726] fixing count method Conflicts: cqlengine/query.py --- cqlengine/query.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index ea81376f7c..7431bd8cb9 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -1,6 +1,9 @@ import copy from datetime import datetime from uuid import uuid4 +from hashlib import md5 +from time import time +from uuid import uuid1 from cqlengine import BaseContainerColumn, BaseValueManager, Map, columns from cqlengine.connection import connection_manager, execute, RowResult @@ -545,8 +548,8 @@ def count(self): qs = ' '.join(qs) - cur = execute(qs, self._where_values()) - return cur.fetchone()[0] + result = execute(qs, self._where_values(), row_factory=decoder.tuple_factory) + return result[0][0] else: return len(self._result_cache) From 533d70b1be65ac5348720c24e7401871112397d4 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Tue, 25 Jun 2013 10:12:12 -0700 Subject: [PATCH 0309/3726] updating management module to work with the native driver --- cqlengine/management.py | 83 ++++++++++++++++++----------------------- 1 file changed, 37 insertions(+), 46 deletions(-) diff --git a/cqlengine/management.py b/cqlengine/management.py index dbe6a497ae..23f32795c6 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -14,36 +14,30 @@ def create_keyspace(name, strategy_class='SimpleStrategy', replication_factor=3, :param **replication_values: 1.2 only, additional values to ad to the replication data map """ with connection_manager() as con: - #TODO: check system tables instead of using cql thrifteries - if not any([name == k.name for k in con.client.describe_keyspaces()]): - # if name not in [k.name for k in con.con.client.describe_keyspaces()]: - try: - #Try the 1.1 method - execute("""CREATE KEYSPACE {} - WITH strategy_class = '{}' - AND strategy_options:replication_factor={};""".format(name, strategy_class, replication_factor)) - except CQLEngineException: - #try the 1.2 method - replication_map = { - 'class': strategy_class, - 'replication_factor':replication_factor - } - replication_map.update(replication_values) - - query = """ - CREATE KEYSPACE {} - WITH REPLICATION = {} - """.format(name, json.dumps(replication_map).replace('"', "'")) - - if strategy_class != 'SimpleStrategy': - query += " AND DURABLE_WRITES = {}".format('true' if durable_writes else 'false') - - execute(query) + keyspaces = con.execute("""SELECT keyspace_name FROM system.schema_keyspaces""", {}) + if name not in [r['keyspace_name'] for r in keyspaces]: + #try the 1.2 method + replication_map = { + 'class': strategy_class, + 'replication_factor':replication_factor + } + replication_map.update(replication_values) + + query = """ + CREATE KEYSPACE {} + WITH REPLICATION = {} + """.format(name, json.dumps(replication_map).replace('"', "'")) + + if strategy_class != 'SimpleStrategy': + query += " AND DURABLE_WRITES = {}".format('true' if durable_writes else 'false') + + execute(query) def delete_keyspace(name): with connection_manager() as con: - if name in [k.name for k in con.client.describe_keyspaces()]: + keyspaces = con.execute("""SELECT keyspace_name FROM system.schema_keyspaces""", {}) + if name in [r['keyspace_name'] for r in keyspaces]: execute("DROP KEYSPACE {}".format(name)) @@ -56,16 +50,17 @@ def create_table(model, create_missing_keyspace=True): cf_name = model.column_family_name() raw_cf_name = model.column_family_name(include_keyspace=False) + ks_name = model._get_keyspace() #create missing keyspace if create_missing_keyspace: - create_keyspace(model._get_keyspace()) + create_keyspace(ks_name) with connection_manager() as con: - ks_info = con.client.describe_keyspace(model._get_keyspace()) + tables = con.execute("SELECT columnfamily_name from system.schema_columnfamilies WHERE keyspace_name = %s", [ks_name]) #check for an existing column family #TODO: check system tables instead of using cql thrifteries - if not any([raw_cf_name == cf.name for cf in ks_info.cf_defs]): + if raw_cf_name not in tables: qs = ['CREATE TABLE {}'.format(cf_name)] #add column types @@ -105,10 +100,9 @@ def add_column(col): #get existing index names, skip ones that already exist with connection_manager() as con: - ks_info = con.client.describe_keyspace(model._get_keyspace()) + idx_names = con.execute("SELECT index_name from system.\"IndexInfo\" WHERE table_name=%s", [raw_cf_name]) - cf_defs = [cf for cf in ks_info.cf_defs if cf.name == raw_cf_name] - idx_names = [i.index_name for i in cf_defs[0].column_metadata] if cf_defs else [] + idx_names = [i['index_name'] for i in idx_names] idx_names = filter(None, idx_names) indexes = [c for n,c in model._columns.items() if c.index] @@ -120,22 +114,19 @@ def add_column(col): qs += ['("{}")'.format(column.db_field_name)] qs = ' '.join(qs) - try: - execute(qs) - except CQLEngineException as ex: - # 1.2 doesn't return cf names, so we have to examine the exception - # and ignore if it says the index already exists - if "Index already exists" not in unicode(ex): - raise + execute(qs) def delete_table(model): - cf_name = model.column_family_name() - try: - execute('drop table {};'.format(cf_name)) - except CQLEngineException as ex: - #don't freak out if the table doesn't exist - if 'Cannot drop non existing column family' not in unicode(ex): - raise + # don't try to delete non existant tables + ks_name = model._get_keyspace() + with connection_manager() as con: + tables = con.execute("SELECT columnfamily_name from system.schema_columnfamilies WHERE keyspace_name = %s", [ks_name]) + raw_cf_name = model.column_family_name(include_keyspace=False) + if raw_cf_name not in [t['columnfamily_name'] for t in tables]: + return + + cf_name = model.column_family_name() + execute('drop table {};'.format(cf_name)) From 5c6e8601ec0b73b14db76ab713f1b5fb0d8e8614 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Mon, 1 Jul 2013 16:09:03 -0700 Subject: [PATCH 0310/3726] updating management functions to work with the cql driver --- cqlengine/management.py | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/cqlengine/management.py b/cqlengine/management.py index 23f32795c6..d66e808422 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -56,7 +56,10 @@ def create_table(model, create_missing_keyspace=True): create_keyspace(ks_name) with connection_manager() as con: - tables = con.execute("SELECT columnfamily_name from system.schema_columnfamilies WHERE keyspace_name = %s", [ks_name]) + tables = con.execute( + "SELECT columnfamily_name from system.schema_columnfamilies WHERE keyspace_name = :ks_name", + {'ks_name': ks_name} + ) #check for an existing column family #TODO: check system tables instead of using cql thrifteries @@ -100,7 +103,10 @@ def add_column(col): #get existing index names, skip ones that already exist with connection_manager() as con: - idx_names = con.execute("SELECT index_name from system.\"IndexInfo\" WHERE table_name=%s", [raw_cf_name]) + idx_names = con.execute( + "SELECT index_name from system.\"IndexInfo\" WHERE table_name=:table_name", + {'table_name': raw_cf_name} + ) idx_names = [i['index_name'] for i in idx_names] idx_names = filter(None, idx_names) @@ -122,7 +128,10 @@ def delete_table(model): # don't try to delete non existant tables ks_name = model._get_keyspace() with connection_manager() as con: - tables = con.execute("SELECT columnfamily_name from system.schema_columnfamilies WHERE keyspace_name = %s", [ks_name]) + tables = con.execute( + "SELECT columnfamily_name from system.schema_columnfamilies WHERE keyspace_name = :ks_name", + {'ks_name': ks_name} + ) raw_cf_name = model.column_family_name(include_keyspace=False) if raw_cf_name not in [t['columnfamily_name'] for t in tables]: return From 8aa4b04374539b95cbc4b744c77ea79d5f87704c Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Mon, 1 Jul 2013 16:18:38 -0700 Subject: [PATCH 0311/3726] updating connection to work with the cql driver --- cqlengine/connection.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index 8b3a8f0479..588390b398 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -153,9 +153,11 @@ def execute(self, query, params): con = self.get() cur = con.cursor() cur.execute(query, params) + columns = [i[0] for i in cur.description or []] + results = [RowResult(r) for r in cur.fetchall()] LOG.debug('{} {}'.format(query, repr(params))) self.put(con) - return cur + return columns, results except cql.ProgrammingError as ex: raise CQLEngineException(unicode(ex)) except TTransportException: @@ -163,9 +165,11 @@ def execute(self, query, params): raise CQLEngineException("Could not execute query against the cluster") + def execute(query, params={}): return connection_pool.execute(query, params) + @contextmanager def connection_manager(): """ :rtype: ConnectionPool """ From 0590c5e0f6d7457bdafbff6f7f3a0122e938c804 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Mon, 1 Jul 2013 16:19:07 -0700 Subject: [PATCH 0312/3726] updating management to work with with the cql driver --- cqlengine/management.py | 12 ++++++------ cqlengine/query.py | 6 ------ 2 files changed, 6 insertions(+), 12 deletions(-) diff --git a/cqlengine/management.py b/cqlengine/management.py index d66e808422..293e2ff4c7 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -14,8 +14,8 @@ def create_keyspace(name, strategy_class='SimpleStrategy', replication_factor=3, :param **replication_values: 1.2 only, additional values to ad to the replication data map """ with connection_manager() as con: - keyspaces = con.execute("""SELECT keyspace_name FROM system.schema_keyspaces""", {}) - if name not in [r['keyspace_name'] for r in keyspaces]: + _, keyspaces = con.execute("""SELECT keyspace_name FROM system.schema_keyspaces""", {}) + if name not in [r[0] for r in keyspaces]: #try the 1.2 method replication_map = { 'class': strategy_class, @@ -103,12 +103,12 @@ def add_column(col): #get existing index names, skip ones that already exist with connection_manager() as con: - idx_names = con.execute( + _, idx_names = con.execute( "SELECT index_name from system.\"IndexInfo\" WHERE table_name=:table_name", {'table_name': raw_cf_name} ) - idx_names = [i['index_name'] for i in idx_names] + idx_names = [i[0] for i in idx_names] idx_names = filter(None, idx_names) indexes = [c for n,c in model._columns.items() if c.index] @@ -128,12 +128,12 @@ def delete_table(model): # don't try to delete non existant tables ks_name = model._get_keyspace() with connection_manager() as con: - tables = con.execute( + _, tables = con.execute( "SELECT columnfamily_name from system.schema_columnfamilies WHERE keyspace_name = :ks_name", {'ks_name': ks_name} ) raw_cf_name = model.column_family_name(include_keyspace=False) - if raw_cf_name not in [t['columnfamily_name'] for t in tables]: + if raw_cf_name not in [t[0] for t in tables]: return cf_name = model.column_family_name() diff --git a/cqlengine/query.py b/cqlengine/query.py index 7431bd8cb9..b8b05f2536 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -306,12 +306,6 @@ def __len__(self): self._execute_query() return len(self._result_cache) - def __del__(self): - if self._con: - self._con.close() - self._con = None - self._cur = None - #----query generation / execution---- def _where_clause(self): From 92ef6fce17c766ee8d3a3b8390ed473ac31810c4 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Mon, 1 Jul 2013 16:26:02 -0700 Subject: [PATCH 0313/3726] fixing some problems with the eager result loading --- cqlengine/management.py | 10 +++++++--- cqlengine/query.py | 3 ++- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/cqlengine/management.py b/cqlengine/management.py index 293e2ff4c7..46676fe31e 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -36,8 +36,8 @@ def create_keyspace(name, strategy_class='SimpleStrategy', replication_factor=3, def delete_keyspace(name): with connection_manager() as con: - keyspaces = con.execute("""SELECT keyspace_name FROM system.schema_keyspaces""", {}) - if name in [r['keyspace_name'] for r in keyspaces]: + _, keyspaces = con.execute("""SELECT keyspace_name FROM system.schema_keyspaces""", {}) + if name in [r[0] for r in keyspaces]: execute("DROP KEYSPACE {}".format(name)) @@ -120,7 +120,11 @@ def add_column(col): qs += ['("{}")'.format(column.db_field_name)] qs = ' '.join(qs) - execute(qs) + try: + execute(qs) + except CQLEngineException: + # index already exists + pass def delete_table(model): diff --git a/cqlengine/query.py b/cqlengine/query.py index b8b05f2536..a64a0fd8e8 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -542,7 +542,7 @@ def count(self): qs = ' '.join(qs) - result = execute(qs, self._where_values(), row_factory=decoder.tuple_factory) + _, result = execute(qs, self._where_values()) return result[0][0] else: return len(self._result_cache) @@ -710,6 +710,7 @@ def _construct_instance(values): return instance return _construct_instance + columns = [model._columns[n] for n in names] if self._flat_values_list: return (lambda values: columns[0].to_python(values[0])) else: From e9b055452af5c26b7e33e3fa006c2fbc09d69bc2 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Mon, 1 Jul 2013 17:39:21 -0700 Subject: [PATCH 0314/3726] wrapping query results in a named tuple --- cqlengine/connection.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index 588390b398..97c96d4fab 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -34,6 +34,8 @@ class CQLConnectionError(CQLEngineException): pass class RowResult(tuple): pass +QueryResult = namedtuple('RowResult', ('columns', 'results')) + def _column_tuple_factory(colnames, values): return tuple(colnames), [RowResult(v) for v in values] @@ -157,7 +159,7 @@ def execute(self, query, params): results = [RowResult(r) for r in cur.fetchall()] LOG.debug('{} {}'.format(query, repr(params))) self.put(con) - return columns, results + return QueryResult(columns, results) except cql.ProgrammingError as ex: raise CQLEngineException(unicode(ex)) except TTransportException: From 779a960edb054dcc01f19c308d0fdfc0507e5fbe Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Mon, 1 Jul 2013 17:39:33 -0700 Subject: [PATCH 0315/3726] updating changelog and version --- changelog | 5 +++++ cqlengine/VERSION | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/changelog b/changelog index d090b319c6..69582a2cd9 100644 --- a/changelog +++ b/changelog @@ -1,5 +1,10 @@ CHANGELOG +0.5 +* eagerly loading results into the query result cache, the cql driver does this anyway, + and pulling them from the cursor was causing some problems with gevented queries, + this will cause some breaking changes for users calling execute directly + 0.4.10 * changing query parameter placeholders from uuid1 to uuid4 diff --git a/cqlengine/VERSION b/cqlengine/VERSION index e8423da873..2eb3c4fe4e 100644 --- a/cqlengine/VERSION +++ b/cqlengine/VERSION @@ -1 +1 @@ -0.4.10 +0.5 From 8091278d6ab4a427da6582f02a4feec8f9194eb4 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Fri, 5 Jul 2013 17:44:25 -0700 Subject: [PATCH 0316/3726] removing auto column from docs --- docs/topics/models.rst | 3 --- 1 file changed, 3 deletions(-) diff --git a/docs/topics/models.rst b/docs/topics/models.rst index f599f2cfb7..16d95db76d 100644 --- a/docs/topics/models.rst +++ b/docs/topics/models.rst @@ -146,6 +146,3 @@ Model Attributes *Optional.* Sets the name of the keyspace used by this model. Defaulst to cqlengine -Automatic Primary Keys -====================== - CQL requires that all tables define at least one primary key. If a model definition does not include a primary key column, cqlengine will automatically add a uuid primary key column named ``id``. From 6ea6d178836291950ba9b39dd9a564f9fe7478e2 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sat, 6 Jul 2013 08:28:40 -0700 Subject: [PATCH 0317/3726] fixing doc version --- docs/conf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/conf.py b/docs/conf.py index 847ef50dce..37422f20a5 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -47,7 +47,7 @@ # |version| and |release|, also used in various other places throughout the # built documents. # -__cqlengine_version_path__ = os.path.realpath(__file__ + '/../VERSION') +__cqlengine_version_path__ = os.path.realpath(__file__ + '/../../cqlengine/VERSION') # The short X.Y version. version = open(__cqlengine_version_path__, 'r').readline().strip() # The full version, including alpha/beta/rc tags. From 714f1e0214620f9a491dd08508ee34cacea24b86 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sat, 6 Jul 2013 08:28:50 -0700 Subject: [PATCH 0318/3726] updating columns docs --- docs/topics/columns.rst | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/docs/topics/columns.rst b/docs/topics/columns.rst index 987d7d7c4f..d323ac216a 100644 --- a/docs/topics/columns.rst +++ b/docs/topics/columns.rst @@ -46,18 +46,30 @@ Columns Stores a datetime value. - Python's datetime.now callable is set as the default value for this column :: - columns.DateTime() .. class:: UUID() Stores a type 1 or type 4 UUID. - Python's uuid.uuid4 callable is set as the default value for this column. :: - columns.UUID() +.. class:: TimeUUID() + + Stores a UUID value as the cql type 'timeuuid' :: + + columns.TimeUUID() + + .. classmethod:: from_datetime(dt) + + generates a TimeUUID for the given datetime + + :param dt: the datetime to create a time uuid from + :type dt: datetime.datetime + + :returns: a time uuid created from the given datetime + :rtype: uuid1 + .. class:: Boolean() Stores a boolean True or False value :: @@ -91,6 +103,7 @@ Collection Type Columns .. code-block:: python class Person(Model): + id = columns.UUID(primary_key=True, default=uuid.uuid4) first_name = columns.Text() last_name = columns.Text() From 2a779643bd0534360e1a194aca4480c360214582 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sat, 6 Jul 2013 08:32:23 -0700 Subject: [PATCH 0319/3726] fixing the default value on the example partition key --- README.md | 2 +- docs/index.rst | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 10531060e5..23516e78fe 100644 --- a/README.md +++ b/README.md @@ -27,7 +27,7 @@ from cqlengine.models import Model class ExampleModel(Model): read_repair_chance = 0.05 # optional - defaults to 0.1 - example_id = columns.UUID(primary_key=True) + example_id = columns.UUID(primary_key=True, default=uuid.uuid4) example_type = columns.Integer(index=True) created_at = columns.DateTime() description = columns.Text(required=False) diff --git a/docs/index.rst b/docs/index.rst index 9dff72fd56..2053e35934 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -36,7 +36,7 @@ Getting Started from cqlengine import Model class ExampleModel(Model): - example_id = columns.UUID(primary_key=True) + example_id = columns.UUID(primary_key=True, default=uuid.uuid4) example_type = columns.Integer(index=True) created_at = columns.DateTime() description = columns.Text(required=False) From 8e0cccf8f57c74808fbca6d7c2eecc0b682ff603 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sat, 6 Jul 2013 08:34:49 -0700 Subject: [PATCH 0320/3726] removing other references to auto defaults and ids from the docs --- README.md | 2 -- docs/index.rst | 2 -- docs/topics/models.rst | 2 +- 3 files changed, 1 insertion(+), 5 deletions(-) diff --git a/README.md b/README.md index 23516e78fe..09e2014241 100644 --- a/README.md +++ b/README.md @@ -49,8 +49,6 @@ class ExampleModel(Model): >>> em6 = ExampleModel.create(example_type=1, description="example6", created_at=datetime.now()) >>> em7 = ExampleModel.create(example_type=1, description="example7", created_at=datetime.now()) >>> em8 = ExampleModel.create(example_type=1, description="example8", created_at=datetime.now()) -# Note: the UUID and DateTime columns will create uuid4 and datetime.now -# values automatically if we don't specify them when creating new rows #and now we can run some queries against our table >>> ExampleModel.objects.count() diff --git a/docs/index.rst b/docs/index.rst index 2053e35934..243c365313 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -58,8 +58,6 @@ Getting Started >>> em6 = ExampleModel.create(example_type=1, description="example6", created_at=datetime.now()) >>> em7 = ExampleModel.create(example_type=1, description="example7", created_at=datetime.now()) >>> em8 = ExampleModel.create(example_type=1, description="example8", created_at=datetime.now()) - # Note: the UUID and DateTime columns will create uuid4 and datetime.now - # values automatically if we don't specify them when creating new rows #and now we can run some queries against our table >>> ExampleModel.objects.count() diff --git a/docs/topics/models.rst b/docs/topics/models.rst index 16d95db76d..3360ba7fee 100644 --- a/docs/topics/models.rst +++ b/docs/topics/models.rst @@ -41,7 +41,7 @@ The Person model would create this CQL table: Columns ======= - Columns in your models map to columns in your CQL table. You define CQL columns by defining column attributes on your model classes. For a model to be valid it needs at least one primary key column (defined automatically if you don't define one) and one non-primary key column. + Columns in your models map to columns in your CQL table. You define CQL columns by defining column attributes on your model classes. For a model to be valid it needs at least one primary key column and one non-primary key column. Just as in CQL, the order you define your columns in is important, and is the same order they are defined in on a model's corresponding table. From 600c9175cd648cff965254b2c9aa317363851fa3 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sat, 6 Jul 2013 08:45:51 -0700 Subject: [PATCH 0321/3726] updating description --- docs/index.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/index.rst b/docs/index.rst index 243c365313..1122599cca 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -9,7 +9,7 @@ cqlengine documentation .. _Breaking Changes: https://groups.google.com/forum/?fromgroups#!topic/cqlengine-users/erkSNe1JwuU -cqlengine is a Cassandra CQL 3 Object Mapper for Python with an interface similar to the Django orm and mongoengine +cqlengine is a Cassandra CQL 3 Object Mapper for Python :ref:`getting-started` From ce5da33e57f697922d430ec89e4d68a2c0295334 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Tue, 9 Jul 2013 21:25:07 -0700 Subject: [PATCH 0322/3726] added a retry until no servers can be contacted --- cqlengine/connection.py | 30 ++++++++++++++++-------------- 1 file changed, 16 insertions(+), 14 deletions(-) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index 97c96d4fab..dde5e3da6f 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -151,21 +151,23 @@ def _create_connection(self): raise CQLConnectionError("Could not connect to any server in cluster") def execute(self, query, params): - try: - con = self.get() - cur = con.cursor() - cur.execute(query, params) - columns = [i[0] for i in cur.description or []] - results = [RowResult(r) for r in cur.fetchall()] - LOG.debug('{} {}'.format(query, repr(params))) - self.put(con) - return QueryResult(columns, results) - except cql.ProgrammingError as ex: - raise CQLEngineException(unicode(ex)) - except TTransportException: - pass + while True: + try: + con = self.get() + cur = con.cursor() + cur.execute(query, params) + columns = [i[0] for i in cur.description or []] + results = [RowResult(r) for r in cur.fetchall()] + LOG.debug('{} {}'.format(query, repr(params))) + self.put(con) + return QueryResult(columns, results) + except CQLConnectionError as ex: + raise CQLEngineException("Could not execute query against the cluster") + except cql.ProgrammingError as ex: + raise CQLEngineException(unicode(ex)) + except TTransportException: + raise CQLEngineException("Could not execute query against the cluster") - raise CQLEngineException("Could not execute query against the cluster") def execute(query, params={}): From 21bb32c045ecd484986c324298483e3a5acfffbb Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sat, 13 Jul 2013 11:57:52 -0700 Subject: [PATCH 0323/3726] quoting clustering order column, fixes #88 --- cqlengine/management.py | 4 ++-- cqlengine/tests/management/test_management.py | 22 +++++++++++++++++++ 2 files changed, 24 insertions(+), 2 deletions(-) diff --git a/cqlengine/management.py b/cqlengine/management.py index 46676fe31e..be96b79f80 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -85,9 +85,9 @@ def add_column(col): with_qs = ['read_repair_chance = {}'.format(model.__read_repair_chance__)] - _order = ["%s %s" % (c.db_field_name, c.clustering_order or 'ASC') for c in model._clustering_keys.values()] + _order = ['"{}" {}'.format(c.db_field_name, c.clustering_order or 'ASC') for c in model._clustering_keys.values()] if _order: - with_qs.append("clustering order by ({})".format(', '.join(_order))) + with_qs.append('clustering order by ({})'.format(', '.join(_order))) # add read_repair_chance qs += ['WITH {}'.format(' AND '.join(with_qs))] diff --git a/cqlengine/tests/management/test_management.py b/cqlengine/tests/management/test_management.py index 4e271c58ce..5e00bb977d 100644 --- a/cqlengine/tests/management/test_management.py +++ b/cqlengine/tests/management/test_management.py @@ -7,6 +7,8 @@ from mock import Mock, MagicMock, MagicProxy, patch from cqlengine import management from cqlengine.tests.query.test_queryset import TestModel +from cqlengine.models import Model +from cqlengine import columns from cql.thrifteries import ThriftConnection @@ -65,3 +67,23 @@ def test_multiple_deletes_dont_fail(self): delete_table(TestModel) delete_table(TestModel) + +class LowercaseKeyModel(Model): + first_key = columns.Integer(primary_key=True) + second_key = columns.Integer(primary_key=True) + some_data = columns.Text() + +class CapitalizedKeyModel(Model): + firstKey = columns.Integer(primary_key=True) + secondKey = columns.Integer(primary_key=True) + someData = columns.Text() + +class CapitalizedKeyTest(BaseCassEngTestCase): + + def test_table_definition(self): + """ Tests that creating a table with capitalized column names succeedso """ + create_table(LowercaseKeyModel) + create_table(CapitalizedKeyModel) + + delete_table(LowercaseKeyModel) + delete_table(CapitalizedKeyModel) From 019f1435746a073cd799870cbab7ae4fcf7585b0 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sat, 13 Jul 2013 12:04:04 -0700 Subject: [PATCH 0324/3726] updating changelog and version# --- changelog | 4 ++++ cqlengine/VERSION | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/changelog b/changelog index 69582a2cd9..752a1eaa48 100644 --- a/changelog +++ b/changelog @@ -1,5 +1,9 @@ CHANGELOG +0.5.1 +* improving connection pooling +* fixing bug with clustering order columns not being quoted + 0.5 * eagerly loading results into the query result cache, the cql driver does this anyway, and pulling them from the cursor was causing some problems with gevented queries, diff --git a/cqlengine/VERSION b/cqlengine/VERSION index 2eb3c4fe4e..4b9fcbec10 100644 --- a/cqlengine/VERSION +++ b/cqlengine/VERSION @@ -1 +1 @@ -0.5 +0.5.1 From e11a0b0d7c93366a4875e2ef5b1b998941017014 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sat, 13 Jul 2013 12:13:48 -0700 Subject: [PATCH 0325/3726] adding hex conversion to Bytes column --- cqlengine/columns.py | 5 +++++ cqlengine/tests/columns/test_value_io.py | 6 ++++++ 2 files changed, 11 insertions(+) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index 3b7a22d06e..f82e468fe7 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -195,6 +195,11 @@ def get_cql(self): class Bytes(Column): db_type = 'blob' + def to_database(self, value): + val = super(Bytes, self).to_database(value) + if val is None: return + return val.encode('hex') + class Ascii(Column): db_type = 'ascii' diff --git a/cqlengine/tests/columns/test_value_io.py b/cqlengine/tests/columns/test_value_io.py index 64054279cc..1c822a9e5d 100644 --- a/cqlengine/tests/columns/test_value_io.py +++ b/cqlengine/tests/columns/test_value_io.py @@ -77,6 +77,12 @@ def test_column_io(self): #delete self._generated_model.filter(pkey=pkey).delete() +class TestBlobIO(BaseColumnIOTest): + + column = columns.Bytes + pkey_val = 'blake', uuid4().bytes + data_val = 'eggleston', uuid4().bytes + class TestTextIO(BaseColumnIOTest): column = columns.Text From 69c288019733a8dd3fad95c627803ead81b995a2 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sat, 13 Jul 2013 12:14:15 -0700 Subject: [PATCH 0326/3726] updating version and changelog --- changelog | 3 +++ cqlengine/VERSION | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/changelog b/changelog index 752a1eaa48..0da8f3c7e9 100644 --- a/changelog +++ b/changelog @@ -1,5 +1,8 @@ CHANGELOG +0.5.2 +* adding hex conversion to Bytes column + 0.5.1 * improving connection pooling * fixing bug with clustering order columns not being quoted diff --git a/cqlengine/VERSION b/cqlengine/VERSION index 4b9fcbec10..cb0c939a93 100644 --- a/cqlengine/VERSION +++ b/cqlengine/VERSION @@ -1 +1 @@ -0.5.1 +0.5.2 From 49a6e16d27ba5eb6fc4fccd2259eba665505c764 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Tue, 23 Jul 2013 12:38:26 -0700 Subject: [PATCH 0327/3726] fixing a mutable kwarg --- cqlengine/connection.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index dde5e3da6f..b2d62023f3 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -169,11 +169,10 @@ def execute(self, query, params): raise CQLEngineException("Could not execute query against the cluster") - -def execute(query, params={}): +def execute(query, params=None): + params = params or {} return connection_pool.execute(query, params) - @contextmanager def connection_manager(): """ :rtype: ConnectionPool """ From c99903aa9aa8084d04d7d477263536b803d4a6c1 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Tue, 23 Jul 2013 12:48:06 -0700 Subject: [PATCH 0328/3726] adding counter column --- cqlengine/columns.py | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index f82e468fe7..cd01cf40ee 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -243,6 +243,24 @@ def to_database(self, value): return self.validate(value) +class Counter(Integer): + db_type = 'counter' + + def get_update_statement(self, val, prev, ctx): + val = self.to_database(val) + prev = self.to_database(prev or 0) + + delta = val - prev + if delta == 0: + return [] + + field_id = uuid4().hex + sign = '+' if delta > 0 else '-' + delta = abs(delta) + ctx[field_id] = delta + return ['"{0}" = "{0}" {1} {2}'.format(self.db_field_name, sign, delta)] + + class DateTime(Column): db_type = 'timestamp' @@ -363,7 +381,6 @@ def from_datetime(self, dt): clock_seq_hi_variant, clock_seq_low, node), version=1) - class Boolean(Column): db_type = 'boolean' @@ -419,11 +436,6 @@ def to_python(self, value): def to_database(self, value): return self.validate(value) -class Counter(Column): - #TODO: counter field - def __init__(self, **kwargs): - super(Counter, self).__init__(**kwargs) - raise NotImplementedError class BaseContainerColumn(Column): """ From ceb487af88507aaa0df82cefbbf528d63c5248de Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Tue, 23 Jul 2013 12:48:34 -0700 Subject: [PATCH 0329/3726] adding counter support to update statement generator --- cqlengine/query.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index a64a0fd8e8..4327ff1155 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -5,6 +5,7 @@ from time import time from uuid import uuid1 from cqlengine import BaseContainerColumn, BaseValueManager, Map, columns +from cqlengine.columns import Counter from cqlengine.connection import connection_manager, execute, RowResult @@ -809,8 +810,9 @@ def save(self): for name, col in self.model._columns.items(): if not col.is_primary_key: val = values.get(name) - if val is None: continue - if isinstance(col, BaseContainerColumn): + if val is None: + continue + if isinstance(col, (BaseContainerColumn, Counter)): #remove value from query values, the column will handle it query_values.pop(field_ids.get(name), None) From 17b77ea9e4b03a6caa4611db3966610dfc491a42 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Tue, 23 Jul 2013 12:53:00 -0700 Subject: [PATCH 0330/3726] adding support for using set statements for counter columns if there is no previous value, reformatting --- cqlengine/columns.py | 18 ++++++++++++++++-- .../tests/columns/test_container_columns.py | 3 ++- 2 files changed, 18 insertions(+), 3 deletions(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index cd01cf40ee..0f505ccc02 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -192,6 +192,7 @@ def cql(self): def get_cql(self): return '"{}"'.format(self.db_field_name) + class Bytes(Column): db_type = 'blob' @@ -200,9 +201,11 @@ def to_database(self, value): if val is None: return return val.encode('hex') + class Ascii(Column): db_type = 'ascii' + class Text(Column): db_type = 'text' @@ -248,13 +251,19 @@ class Counter(Integer): def get_update_statement(self, val, prev, ctx): val = self.to_database(val) - prev = self.to_database(prev or 0) + prev = self.to_database(prev) + field_id = uuid4().hex + + # use a set statement if there is no + # previous value to compute a delta from + if prev is None: + ctx[field_id] = val + return ['"{}" = :{}'.format(self.db_field_name, field_id)] delta = val - prev if delta == 0: return [] - field_id = uuid4().hex sign = '+' if delta > 0 else '-' delta = abs(delta) ctx[field_id] = delta @@ -334,6 +343,7 @@ def to_database(self, value): from uuid import UUID as pyUUID, getnode + class TimeUUID(UUID): """ UUID containing timestamp @@ -417,6 +427,7 @@ def to_python(self, value): def to_database(self, value): return self.validate(value) + class Decimal(Column): db_type = 'decimal' @@ -476,6 +487,7 @@ def get_update_statement(self, val, prev, ctx): """ raise NotImplementedError + class Set(BaseContainerColumn): """ Stores a set of unordered, unique values @@ -565,6 +577,7 @@ def get_update_statement(self, val, prev, ctx): return statements + class List(BaseContainerColumn): """ Stores a list of ordered values @@ -789,6 +802,7 @@ def get_delete_statement(self, val, prev, ctx): return del_statements + class _PartitionKeysToken(Column): """ virtual column representing token of partition columns. diff --git a/cqlengine/tests/columns/test_container_columns.py b/cqlengine/tests/columns/test_container_columns.py index 6cf218299c..cd11fb5abb 100644 --- a/cqlengine/tests/columns/test_container_columns.py +++ b/cqlengine/tests/columns/test_container_columns.py @@ -28,7 +28,9 @@ def to_database(self, value): if value is None: return return json.dumps(value) + class TestSetColumn(BaseCassEngTestCase): + @classmethod def setUpClass(cls): super(TestSetColumn, cls).setUpClass() @@ -40,7 +42,6 @@ def tearDownClass(cls): super(TestSetColumn, cls).tearDownClass() delete_table(TestSetModel) - def test_empty_set_initial(self): """ tests that sets are set() by default, should never be none From 19d6984e727d79af79c466b0c3842de7a4a46207 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Wed, 24 Jul 2013 09:49:15 -0700 Subject: [PATCH 0331/3726] updating the model and query classes to handle models with counter columns correctly --- cqlengine/columns.py | 26 +++++++++++++++----------- cqlengine/models.py | 10 ++++++++-- cqlengine/query.py | 9 +++++---- 3 files changed, 28 insertions(+), 17 deletions(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index 0f505ccc02..56221c2dd1 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -249,22 +249,26 @@ def to_database(self, value): class Counter(Integer): db_type = 'counter' + def __init__(self, + index=False, + db_field=None, + required=False): + super(Counter, self).__init__( + primary_key=False, + partition_key=False, + index=index, + db_field=db_field, + default=0, + required=required, + ) + def get_update_statement(self, val, prev, ctx): val = self.to_database(val) - prev = self.to_database(prev) + prev = self.to_database(prev or 0) field_id = uuid4().hex - # use a set statement if there is no - # previous value to compute a delta from - if prev is None: - ctx[field_id] = val - return ['"{}" = :{}'.format(self.db_field_name, field_id)] - delta = val - prev - if delta == 0: - return [] - - sign = '+' if delta > 0 else '-' + sign = '-' if delta < 0 else '+' delta = abs(delta) ctx[field_id] = delta return ['"{0}" = "{0}" {1} {2}'.format(self.db_field_name, sign, delta)] diff --git a/cqlengine/models.py b/cqlengine/models.py index a2d9b6abce..640e023bd5 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -70,6 +70,7 @@ def __init__(self, column): def _get_column(self): return self.column + class ColumnDescriptor(object): """ Handles the reading and writing of column values to and from @@ -127,6 +128,7 @@ class BaseModel(object): """ class DoesNotExist(_DoesNotExist): pass + class MultipleObjectsReturned(_MultipleObjectsReturned): pass objects = QuerySetDescriptor() @@ -316,14 +318,17 @@ def _transform_column(col_name, col_obj): column_definitions = inherited_columns.items() + column_definitions - #columns defined on model, excludes automatically - #defined columns defined_columns = OrderedDict(column_definitions) #prepend primary key if one hasn't been defined if not is_abstract and not any([v.primary_key for k,v in column_definitions]): raise ModelDefinitionException("At least 1 primary key is required.") + counter_columns = [c for c in defined_columns.values() if isinstance(c, columns.Counter)] + data_columns = [c for c in defined_columns.values() if not c.primary_key and not isinstance(c, columns.Counter)] + if counter_columns and data_columns: + raise ModelDefinitionException('counter models may not have data columns') + has_partition_keys = any(v.partition_key for (k, v) in column_definitions) #TODO: check that the defined columns don't conflict with any of the Model API's existing attributes/methods @@ -382,6 +387,7 @@ def _transform_column(col_name, col_obj): attrs['_partition_keys'] = partition_keys attrs['_clustering_keys'] = clustering_keys + attrs['_has_counter'] = len(counter_columns) > 0 #setup class exceptions DoesNotExistBase = None diff --git a/cqlengine/query.py b/cqlengine/query.py index 4327ff1155..194b4c9330 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -801,7 +801,7 @@ def save(self): query_values = {field_ids[n]:field_values[n] for n in field_names} qs = [] - if self.instance._can_update(): + if self.instance._has_counter or self.instance._can_update(): qs += ["UPDATE {}".format(self.column_family_name)] qs += ["SET"] @@ -818,7 +818,7 @@ def save(self): val_mgr = self.instance._values[name] set_statements += col.get_update_statement(val, val_mgr.previous_value, query_values) - pass + else: set_statements += ['"{}" = :{}'.format(col.db_field_name, field_ids[col.db_field_name])] qs += [', '.join(set_statements)] @@ -831,8 +831,9 @@ def save(self): qs += [' AND '.join(where_statements)] - # clear the qs if there are not set statements - if not set_statements: qs = [] + # clear the qs if there are no set statements and this is not a counter model + if not set_statements and not self.instance._has_counter: + qs = [] else: qs += ["INSERT INTO {}".format(self.column_family_name)] From 3c24995481d29d145f99217290a2ca3f1cea4be6 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Wed, 24 Jul 2013 09:49:41 -0700 Subject: [PATCH 0332/3726] adding tests around counter columns --- .../tests/columns/test_counter_column.py | 64 +++++++++++++++++++ 1 file changed, 64 insertions(+) create mode 100644 cqlengine/tests/columns/test_counter_column.py diff --git a/cqlengine/tests/columns/test_counter_column.py b/cqlengine/tests/columns/test_counter_column.py new file mode 100644 index 0000000000..f92cb4f418 --- /dev/null +++ b/cqlengine/tests/columns/test_counter_column.py @@ -0,0 +1,64 @@ +from uuid import uuid4 + +from cqlengine import Model, ValidationError +from cqlengine import columns +from cqlengine.management import create_table, delete_table +from cqlengine.tests.base import BaseCassEngTestCase + + +class TestCounterModel(Model): + partition = columns.UUID(primary_key=True, default=uuid4) + cluster = columns.UUID(primary_key=True, default=uuid4) + counter = columns.Counter() + + +class TestClassConstruction(BaseCassEngTestCase): + + def test_defining_a_non_counter_column_fails(self): + """ Tests that defining a non counter column field fails """ + + def test_defining_a_primary_key_counter_column_fails(self): + """ Tests that defining primary keys on counter columns fails """ + + +class TestCounterColumn(BaseCassEngTestCase): + + @classmethod + def setUpClass(cls): + super(TestCounterColumn, cls).setUpClass() + delete_table(TestCounterModel) + create_table(TestCounterModel) + + @classmethod + def tearDownClass(cls): + super(TestCounterColumn, cls).tearDownClass() + delete_table(TestCounterModel) + + def test_updates(self): + """ Tests that counter updates work as intended """ + instance = TestCounterModel.create() + instance.counter += 5 + instance.save() + + actual = TestCounterModel.get(partition=instance.partition) + assert actual.counter == 5 + + def test_concurrent_updates(self): + """ Tests updates from multiple queries reaches the correct value """ + instance = TestCounterModel.create() + new1 = TestCounterModel.get(partition=instance.partition) + new2 = TestCounterModel.get(partition=instance.partition) + + new1.counter += 5 + new1.save() + new2.counter += 5 + new2.save() + + actual = TestCounterModel.get(partition=instance.partition) + assert actual.counter == 10 + + def test_update_from_none(self): + """ Tests that updating from None uses a create statement """ + + def test_multiple_inserts(self): + """ Tests inserting over existing data works as expected """ From 58223eab242d23f43cc67ee7a09ebe6ffbff184f Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 31 Jul 2013 12:35:49 -0700 Subject: [PATCH 0333/3726] Only adds new columns. Will not remove non existing. --- cqlengine/connection.py | 2 + cqlengine/management.py | 41 ++++++++++++++-- cqlengine/tests/management/test_management.py | 48 +++++++++++++++++-- 3 files changed, 85 insertions(+), 6 deletions(-) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index dde5e3da6f..12d37c8ea7 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -154,6 +154,8 @@ def execute(self, query, params): while True: try: con = self.get() + if not con: + raise CQLEngineException("Error calling execute without calling setup.") cur = con.cursor() cur.execute(query, params) columns = [i[0] for i in cur.description or []] diff --git a/cqlengine/management.py b/cqlengine/management.py index be96b79f80..ce72e778b9 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -3,6 +3,13 @@ from cqlengine.connection import connection_manager, execute from cqlengine.exceptions import CQLEngineException +import logging +from collections import namedtuple +Field = namedtuple('Field', ['name', 'type']) + +logger = logging.getLogger(__name__) + + def create_keyspace(name, strategy_class='SimpleStrategy', replication_factor=3, durable_writes=True, **replication_values): """ creates a keyspace @@ -60,6 +67,7 @@ def create_table(model, create_missing_keyspace=True): "SELECT columnfamily_name from system.schema_columnfamilies WHERE keyspace_name = :ks_name", {'ks_name': ks_name} ) + tables = [x[0] for x in tables.results] #check for an existing column family #TODO: check system tables instead of using cql thrifteries @@ -67,9 +75,9 @@ def create_table(model, create_missing_keyspace=True): qs = ['CREATE TABLE {}'.format(cf_name)] #add column types - pkeys = [] - ckeys = [] - qtypes = [] + pkeys = [] # primary keys + ckeys = [] # clustering keys + qtypes = [] # field types def add_column(col): s = col.get_column_def() if col.primary_key: @@ -100,6 +108,19 @@ def add_column(col): # and ignore if it says the column family already exists if "Cannot add already existing column family" not in unicode(ex): raise + else: + # see if we're missing any columns + fields = get_fields(model) + field_names = [x.name for x in fields] + for name, col in model._columns.items(): + if col.primary_key or col.partition_key: continue # we can't mess with the PK + if name in field_names: continue # skip columns already defined + + # add missing column using the column def + query = "ALTER TABLE {} add {}".format(cf_name, col.get_column_def()) + logger.debug(query) + execute(query) + #get existing index names, skip ones that already exist with connection_manager() as con: @@ -126,6 +147,20 @@ def add_column(col): # index already exists pass +def get_fields(model): + # returns all fields that aren't part of the PK + ks_name = model._get_keyspace() + col_family = model.column_family_name(include_keyspace=False) + + with connection_manager() as con: + query = "SELECT column_name, validator FROM system.schema_columns \ + WHERE keyspace_name = :ks_name AND columnfamily_name = :col_family" + + logger.debug("get_fields %s %s", ks_name, col_family) + + tmp = con.execute(query, {'ks_name':ks_name, 'col_family':col_family}) + return [Field(x[0], x[1]) for x in tmp.results] + # convert to Field named tuples def delete_table(model): diff --git a/cqlengine/tests/management/test_management.py b/cqlengine/tests/management/test_management.py index 5e00bb977d..b99bd5a922 100644 --- a/cqlengine/tests/management/test_management.py +++ b/cqlengine/tests/management/test_management.py @@ -1,16 +1,15 @@ from cqlengine.exceptions import CQLEngineException -from cqlengine.management import create_table, delete_table +from cqlengine.management import create_table, delete_table, get_fields from cqlengine.tests.base import BaseCassEngTestCase from cqlengine.connection import ConnectionPool, Host -from mock import Mock, MagicMock, MagicProxy, patch +from mock import MagicMock, patch from cqlengine import management from cqlengine.tests.query.test_queryset import TestModel from cqlengine.models import Model from cqlengine import columns -from cql.thrifteries import ThriftConnection class ConnectionPoolFailoverTestCase(BaseCassEngTestCase): """Test cassandra connection pooling.""" @@ -87,3 +86,46 @@ def test_table_definition(self): delete_table(LowercaseKeyModel) delete_table(CapitalizedKeyModel) + + +class FirstModel(Model): + __table_name__ = 'first_model' + first_key = columns.UUID(primary_key=True) + second_key = columns.UUID() + third_key = columns.Text() + +class SecondModel(Model): + __table_name__ = 'first_model' + first_key = columns.UUID(primary_key=True) + second_key = columns.UUID() + third_key = columns.Text() + fourth_key = columns.Text() + +class ThirdModel(Model): + __table_name__ = 'first_model' + first_key = columns.UUID(primary_key=True) + second_key = columns.UUID() + third_key = columns.Text() + # removed fourth key, but it should stay in the DB + blah = columns.Map(columns.Text, columns.Text) + +class AddColumnTest(BaseCassEngTestCase): + def setUp(self): + delete_table(FirstModel) + + def test_add_column(self): + create_table(FirstModel) + fields = get_fields(FirstModel) + + # this should contain the second key + self.assertEqual(len(fields), 2) + # get schema + create_table(SecondModel) + + fields = get_fields(FirstModel) + self.assertEqual(len(fields), 3) + + create_table(ThirdModel) + fields = get_fields(FirstModel) + self.assertEqual(len(fields), 4) + From 121c04d9758c38c63e5f6f91393d0f8127a26e49 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 31 Jul 2013 15:50:46 -0700 Subject: [PATCH 0334/3726] version bump --- cqlengine/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cqlengine/VERSION b/cqlengine/VERSION index cb0c939a93..be14282b7f 100644 --- a/cqlengine/VERSION +++ b/cqlengine/VERSION @@ -1 +1 @@ -0.5.2 +0.5.3 From 1b4430fa85b2b24d647d2c9380e6f6e3fba312fa Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 31 Jul 2013 16:56:03 -0700 Subject: [PATCH 0335/3726] fixed db field name issue --- cqlengine/management.py | 2 +- cqlengine/tests/management/test_management.py | 12 ++++++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/cqlengine/management.py b/cqlengine/management.py index ce72e778b9..a06a10fb97 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -114,7 +114,7 @@ def add_column(col): field_names = [x.name for x in fields] for name, col in model._columns.items(): if col.primary_key or col.partition_key: continue # we can't mess with the PK - if name in field_names: continue # skip columns already defined + if col.db_field_name in field_names: continue # skip columns already defined # add missing column using the column def query = "ALTER TABLE {} add {}".format(cf_name, col.get_column_def()) diff --git a/cqlengine/tests/management/test_management.py b/cqlengine/tests/management/test_management.py index b99bd5a922..9df6a31507 100644 --- a/cqlengine/tests/management/test_management.py +++ b/cqlengine/tests/management/test_management.py @@ -109,6 +109,14 @@ class ThirdModel(Model): # removed fourth key, but it should stay in the DB blah = columns.Map(columns.Text, columns.Text) +class FourthModel(Model): + __table_name__ = 'first_model' + first_key = columns.UUID(primary_key=True) + second_key = columns.UUID() + third_key = columns.Text() + # removed fourth key, but it should stay in the DB + renamed = columns.Map(columns.Text, columns.Text, db_field='blah') + class AddColumnTest(BaseCassEngTestCase): def setUp(self): delete_table(FirstModel) @@ -129,3 +137,7 @@ def test_add_column(self): fields = get_fields(FirstModel) self.assertEqual(len(fields), 4) + create_table(FourthModel) + fields = get_fields(FirstModel) + self.assertEqual(len(fields), 4) + From 368bfb642a7015bea8841d65554851f6957c8ca5 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 31 Jul 2013 16:57:13 -0700 Subject: [PATCH 0336/3726] fixed bug in alter --- cqlengine/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cqlengine/VERSION b/cqlengine/VERSION index be14282b7f..a918a2aa18 100644 --- a/cqlengine/VERSION +++ b/cqlengine/VERSION @@ -1 +1 @@ -0.5.3 +0.6.0 From 3ee88ecf93ffaad8eb753021b1e811a27c8d513f Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Wed, 14 Aug 2013 15:48:24 -0700 Subject: [PATCH 0337/3726] setting counter column default instantiation value to 0 (not None) --- cqlengine/columns.py | 9 +++++++++ cqlengine/tests/columns/test_counter_column.py | 13 +++++++++++-- 2 files changed, 20 insertions(+), 2 deletions(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index 56221c2dd1..9940f1aa18 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -246,9 +246,18 @@ def to_database(self, value): return self.validate(value) +class CounterValueManager(BaseValueManager): + def __init__(self, instance, column, value): + super(CounterValueManager, self).__init__(instance, column, value) + self.value = self.value or 0 + self.previous_value = self.previous_value or 0 + + class Counter(Integer): db_type = 'counter' + value_manager = CounterValueManager + def __init__(self, index=False, db_field=None, diff --git a/cqlengine/tests/columns/test_counter_column.py b/cqlengine/tests/columns/test_counter_column.py index f92cb4f418..f8dc0096e1 100644 --- a/cqlengine/tests/columns/test_counter_column.py +++ b/cqlengine/tests/columns/test_counter_column.py @@ -59,6 +59,15 @@ def test_concurrent_updates(self): def test_update_from_none(self): """ Tests that updating from None uses a create statement """ + instance = TestCounterModel() + instance.counter += 1 + instance.save() + + new = TestCounterModel.get(partition=instance.partition) + assert new.counter == 1 + + def test_new_instance_defaults_to_zero(self): + """ Tests that instantiating a new model instance will set the counter column to zero """ + instance = TestCounterModel() + assert instance.counter == 0 - def test_multiple_inserts(self): - """ Tests inserting over existing data works as expected """ From a13d6cf88ba099f615555f5c085aabc8f10a283d Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Wed, 14 Aug 2013 15:56:23 -0700 Subject: [PATCH 0338/3726] adding tests and checks to model definition constraints --- cqlengine/models.py | 4 ++++ .../tests/columns/test_counter_column.py | 24 +++++++++++++++++-- 2 files changed, 26 insertions(+), 2 deletions(-) diff --git a/cqlengine/models.py b/cqlengine/models.py index 640e023bd5..d03a00415d 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -334,6 +334,10 @@ def _transform_column(col_name, col_obj): #TODO: check that the defined columns don't conflict with any of the Model API's existing attributes/methods #transform column definitions for k,v in column_definitions: + # counter column primary keys are not allowed + if (v.primary_key or v.partition_key) and isinstance(v, (columns.Counter, columns.BaseContainerColumn)): + raise ModelDefinitionException('counter columns and container columns cannot be used as primary keys') + # this will mark the first primary key column as a partition # key, if one hasn't been set already if not has_partition_keys and v.primary_key: diff --git a/cqlengine/tests/columns/test_counter_column.py b/cqlengine/tests/columns/test_counter_column.py index f8dc0096e1..c260f38ed0 100644 --- a/cqlengine/tests/columns/test_counter_column.py +++ b/cqlengine/tests/columns/test_counter_column.py @@ -1,8 +1,9 @@ from uuid import uuid4 -from cqlengine import Model, ValidationError +from cqlengine import Model from cqlengine import columns from cqlengine.management import create_table, delete_table +from cqlengine.models import ModelDefinitionException from cqlengine.tests.base import BaseCassEngTestCase @@ -15,10 +16,29 @@ class TestCounterModel(Model): class TestClassConstruction(BaseCassEngTestCase): def test_defining_a_non_counter_column_fails(self): - """ Tests that defining a non counter column field fails """ + """ Tests that defining a non counter column field in a model with a counter column fails """ + with self.assertRaises(ModelDefinitionException): + class model(Model): + partition = columns.UUID(primary_key=True, default=uuid4) + counter = columns.Counter() + text = columns.Text() + def test_defining_a_primary_key_counter_column_fails(self): """ Tests that defining primary keys on counter columns fails """ + with self.assertRaises(TypeError): + class model(Model): + partition = columns.UUID(primary_key=True, default=uuid4) + cluster = columns.Counter(primary_ley=True) + counter = columns.Counter() + + # force it + with self.assertRaises(ModelDefinitionException): + class model(Model): + partition = columns.UUID(primary_key=True, default=uuid4) + cluster = columns.Counter() + cluster.primary_key = True + counter = columns.Counter() class TestCounterColumn(BaseCassEngTestCase): From 7a3b6a1bbabfacf77e7e09c6ab4da640d36d3d36 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 14 Aug 2013 18:07:30 -0700 Subject: [PATCH 0339/3726] renamed create_table to sync_table (added an alias), getting compaction started --- cqlengine/__init__.py | 3 +++ cqlengine/management.py | 4 +++- cqlengine/models.py | 2 ++ 3 files changed, 8 insertions(+), 1 deletion(-) diff --git a/cqlengine/__init__.py b/cqlengine/__init__.py index 27ef399572..639b56b0bd 100644 --- a/cqlengine/__init__.py +++ b/cqlengine/__init__.py @@ -8,4 +8,7 @@ __cqlengine_version_path__ = os.path.realpath(__file__ + '/../VERSION') __version__ = open(__cqlengine_version_path__, 'r').readline().strip() +# compaction +SizeTieredCompactionStrategy = "SizeTieredCompactionStrategy" +LeveledCompactionStrategy = "LeveledCompactionStrategy" diff --git a/cqlengine/management.py b/cqlengine/management.py index a06a10fb97..740f78e335 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -47,8 +47,10 @@ def delete_keyspace(name): if name in [r[0] for r in keyspaces]: execute("DROP KEYSPACE {}".format(name)) - def create_table(model, create_missing_keyspace=True): + sync_table(model, create_missing_keyspace) + +def sync_table(model, create_missing_keyspace=True): if model.__abstract__: raise CQLEngineException("cannot create table from abstract model") diff --git a/cqlengine/models.py b/cqlengine/models.py index d03a00415d..7eb88e1728 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -140,6 +140,8 @@ class MultipleObjectsReturned(_MultipleObjectsReturned): pass #the keyspace for this model __keyspace__ = None + + __compaction__ = None __read_repair_chance__ = 0.1 def __init__(self, **values): From 23afb880d976cfb47c4b04d2a6436b5f068ef734 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 15 Aug 2013 13:20:22 -0700 Subject: [PATCH 0340/3726] test classes for different scenarios in place, working to test failure cases --- cqlengine/management.py | 32 +++++++++++ cqlengine/models.py | 16 ++++++ cqlengine/tests/management/test_management.py | 57 ++++++++++++++++++- 3 files changed, 103 insertions(+), 2 deletions(-) diff --git a/cqlengine/management.py b/cqlengine/management.py index 740f78e335..d2ecbca9b9 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -1,4 +1,5 @@ import json +from cqlengine import SizeTieredCompactionStrategy from cqlengine.connection import connection_manager, execute from cqlengine.exceptions import CQLEngineException @@ -99,6 +100,7 @@ def add_column(col): if _order: with_qs.append('clustering order by ({})'.format(', '.join(_order))) + # add read_repair_chance qs += ['WITH {}'.format(' AND '.join(with_qs))] qs = ' '.join(qs) @@ -149,6 +151,36 @@ def add_column(col): # index already exists pass +def get_compaction_options(model): + """ + Generates dictionary (later converted to a string) for creating and altering + tables with compaction strategy + + :param model: + :return: + """ + if not model.__compaction__: + return None + + result = {'class':model.__compaction__} + + def setter(key, limited_to_strategy = None): + mkey = "__compaction_{}__".format(key) + tmp = getattr(model, mkey) + if tmp and limited_to_strategy and limited_to_strategy != model.__compaction__: + raise CQLEngineException("{} is limited to {}".format(key, limited_to_strategy)) + + if tmp: + result[key] = tmp + + setter('min_threshold') + setter('tombstone_compaction_interval') + setter('bucket_high', SizeTieredCompactionStrategy) + setter('bucket_low', SizeTieredCompactionStrategy) + + return result + + def get_fields(model): # returns all fields that aren't part of the PK ks_name = model._get_keyspace() diff --git a/cqlengine/models.py b/cqlengine/models.py index 7eb88e1728..3abf1abaf1 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -141,7 +141,23 @@ class MultipleObjectsReturned(_MultipleObjectsReturned): pass __keyspace__ = None + # compaction options __compaction__ = None + __compaction_tombstone_compaction_interval__ = None + __compaction_tombstone_threshold = None + + # compaction - size tiered options + __compaction_bucket_high__ = None + __compaction_bucket_low__ = None + __compaction_max_threshold__ = None + __compaction_min_threshold__ = None + __compaction_min_sstable_size__ = None + + # compaction - leveled options + __compaction_sstable_size_in_mb__ = None # only works with Leveled + + # end compaction + __read_repair_chance__ = 0.1 def __init__(self, **values): diff --git a/cqlengine/tests/management/test_management.py b/cqlengine/tests/management/test_management.py index 9df6a31507..02973c0485 100644 --- a/cqlengine/tests/management/test_management.py +++ b/cqlengine/tests/management/test_management.py @@ -1,11 +1,11 @@ from cqlengine.exceptions import CQLEngineException -from cqlengine.management import create_table, delete_table, get_fields +from cqlengine.management import create_table, delete_table, get_fields, get_compaction_options from cqlengine.tests.base import BaseCassEngTestCase from cqlengine.connection import ConnectionPool, Host from mock import MagicMock, patch -from cqlengine import management +from cqlengine import management, SizeTieredCompactionStrategy, LeveledCompactionStrategy from cqlengine.tests.query.test_queryset import TestModel from cqlengine.models import Model from cqlengine import columns @@ -141,3 +141,56 @@ def test_add_column(self): fields = get_fields(FirstModel) self.assertEqual(len(fields), 4) +class CompactionModel(Model): + __compaction__ = None + cid = columns.UUID(primary_key=True) + name = columns.Text() + +class CompactionSizeTieredModel(Model): + __compaction__ = SizeTieredCompactionStrategy + cid = columns.UUID(primary_key=True) + name = columns.Text() + +class CompactionLeveledStrategyModel(Model): + __compaction__ = LeveledCompactionStrategy + cid = columns.UUID(primary_key=True) + name = columns.Text() + +import copy + +class EmptyCompactionTest(BaseCassEngTestCase): + def test_empty_compaction(self): + self.model = copy.deepcopy(CompactionModel) + result = get_compaction_options(self.model) + self.assertIsNone(result) + + +class SizeTieredCompactionTest(BaseCassEngTestCase): + + def setUp(self): + self.model = copy.deepcopy(CompactionModel) + self.model.__compaction__ = SizeTieredCompactionStrategy + + def test_size_tiered(self): + result = get_compaction_options(self.model) + assert result['class'] == SizeTieredCompactionStrategy + + def test_min_threshold(self): + self.model.__compaction_min_threshold__ = 2 + + result = get_compaction_options(self.model) + assert result['min_threshold'] == 2 + +class LeveledCompactionTest(BaseCassEngTestCase): + def setUp(self): + self.model = copy.deepcopy(CompactionLeveledStrategyModel) + + def test_simple_leveled(self): + result = get_compaction_options(self.model) + assert result['class'] == LeveledCompactionStrategy + + def test_bucket_high_fails(self): + with patch.object(self.model, '__compaction_bucket_high__', 10), \ + self.assertRaises(CQLEngineException): + result = get_compaction_options(self.model) + From 15f6e613c382f72b8e9333b21d10ca0154a27260 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 15 Aug 2013 13:27:53 -0700 Subject: [PATCH 0341/3726] more size tiered options --- cqlengine/management.py | 4 ++++ cqlengine/tests/management/test_management.py | 24 ++++++++++++++++--- 2 files changed, 25 insertions(+), 3 deletions(-) diff --git a/cqlengine/management.py b/cqlengine/management.py index d2ecbca9b9..f6b5f176ef 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -175,8 +175,12 @@ def setter(key, limited_to_strategy = None): setter('min_threshold') setter('tombstone_compaction_interval') + setter('bucket_high', SizeTieredCompactionStrategy) setter('bucket_low', SizeTieredCompactionStrategy) + setter('max_threshold', SizeTieredCompactionStrategy) + setter('min_threshold', SizeTieredCompactionStrategy) + setter('min_sstable_size', SizeTieredCompactionStrategy) return result diff --git a/cqlengine/tests/management/test_management.py b/cqlengine/tests/management/test_management.py index 02973c0485..5caa337a52 100644 --- a/cqlengine/tests/management/test_management.py +++ b/cqlengine/tests/management/test_management.py @@ -185,12 +185,30 @@ class LeveledCompactionTest(BaseCassEngTestCase): def setUp(self): self.model = copy.deepcopy(CompactionLeveledStrategyModel) + def assert_option_fails(self, key): + # key is a normal_key, converted to + # __compaction_key__ + key = "__compaction_{}__".format(key) + + with patch.object(self.model, key, 10), \ + self.assertRaises(CQLEngineException): + get_compaction_options(self.model) + def test_simple_leveled(self): result = get_compaction_options(self.model) assert result['class'] == LeveledCompactionStrategy def test_bucket_high_fails(self): - with patch.object(self.model, '__compaction_bucket_high__', 10), \ - self.assertRaises(CQLEngineException): - result = get_compaction_options(self.model) + self.assert_option_fails('bucket_high') + + def test_bucket_low_fails(self): + self.assert_option_fails('bucket_low') + + def test_max_threshold_fails(self): + self.assert_option_fails('max_threshold') + + def test_min_threshold_fails(self): + self.assert_option_fails('min_threshold') + def test_min_sstable_size_fails(self): + self.assert_option_fails('min_sstable_size') From 8c38d7a6eb08ba76e7d014e33aaffbda99484634 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 15 Aug 2013 14:02:48 -0700 Subject: [PATCH 0342/3726] finishing out available options --- cqlengine/management.py | 4 ++- cqlengine/tests/management/test_management.py | 25 ++++++++++--------- 2 files changed, 16 insertions(+), 13 deletions(-) diff --git a/cqlengine/management.py b/cqlengine/management.py index f6b5f176ef..b0c24b829c 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -1,5 +1,5 @@ import json -from cqlengine import SizeTieredCompactionStrategy +from cqlengine import SizeTieredCompactionStrategy, LeveledCompactionStrategy from cqlengine.connection import connection_manager, execute from cqlengine.exceptions import CQLEngineException @@ -182,6 +182,8 @@ def setter(key, limited_to_strategy = None): setter('min_threshold', SizeTieredCompactionStrategy) setter('min_sstable_size', SizeTieredCompactionStrategy) + setter("sstable_size_in_mb", LeveledCompactionStrategy) + return result diff --git a/cqlengine/tests/management/test_management.py b/cqlengine/tests/management/test_management.py index 5caa337a52..028fe9a5cb 100644 --- a/cqlengine/tests/management/test_management.py +++ b/cqlengine/tests/management/test_management.py @@ -164,8 +164,18 @@ def test_empty_compaction(self): result = get_compaction_options(self.model) self.assertIsNone(result) +class BaseCompactionTest(BaseCassEngTestCase): + def assert_option_fails(self, key): + # key is a normal_key, converted to + # __compaction_key__ + + key = "__compaction_{}__".format(key) -class SizeTieredCompactionTest(BaseCassEngTestCase): + with patch.object(self.model, key, 10), \ + self.assertRaises(CQLEngineException): + get_compaction_options(self.model) + +class SizeTieredCompactionTest(BaseCompactionTest): def setUp(self): self.model = copy.deepcopy(CompactionModel) @@ -177,23 +187,13 @@ def test_size_tiered(self): def test_min_threshold(self): self.model.__compaction_min_threshold__ = 2 - result = get_compaction_options(self.model) assert result['min_threshold'] == 2 -class LeveledCompactionTest(BaseCassEngTestCase): +class LeveledCompactionTest(BaseCompactionTest): def setUp(self): self.model = copy.deepcopy(CompactionLeveledStrategyModel) - def assert_option_fails(self, key): - # key is a normal_key, converted to - # __compaction_key__ - key = "__compaction_{}__".format(key) - - with patch.object(self.model, key, 10), \ - self.assertRaises(CQLEngineException): - get_compaction_options(self.model) - def test_simple_leveled(self): result = get_compaction_options(self.model) assert result['class'] == LeveledCompactionStrategy @@ -212,3 +212,4 @@ def test_min_threshold_fails(self): def test_min_sstable_size_fails(self): self.assert_option_fails('min_sstable_size') + From 95f851863b409cdc2c3185ce339ccaf8dc5cd667 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 15 Aug 2013 14:05:05 -0700 Subject: [PATCH 0343/3726] check for sstable size in MB ok in leveled compaction --- cqlengine/tests/management/test_management.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/cqlengine/tests/management/test_management.py b/cqlengine/tests/management/test_management.py index 028fe9a5cb..d308bb5038 100644 --- a/cqlengine/tests/management/test_management.py +++ b/cqlengine/tests/management/test_management.py @@ -213,3 +213,7 @@ def test_min_threshold_fails(self): def test_min_sstable_size_fails(self): self.assert_option_fails('min_sstable_size') + def test_sstable_size_in_mb(self): + with patch.object(self.model, '__compaction_sstable_size_in_mb__', 32): + result = get_compaction_options(self.model) + assert result['sstable_size_in_mb'] == 32 From a9de7843cfd788efc8f4736413ec492134d03e29 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 15 Aug 2013 14:49:16 -0700 Subject: [PATCH 0344/3726] breaking apart sync table --- cqlengine/management.py | 64 ++++++++++++++++++++++------------------- 1 file changed, 35 insertions(+), 29 deletions(-) diff --git a/cqlengine/management.py b/cqlengine/management.py index b0c24b829c..306d330cf3 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -75,35 +75,7 @@ def sync_table(model, create_missing_keyspace=True): #check for an existing column family #TODO: check system tables instead of using cql thrifteries if raw_cf_name not in tables: - qs = ['CREATE TABLE {}'.format(cf_name)] - - #add column types - pkeys = [] # primary keys - ckeys = [] # clustering keys - qtypes = [] # field types - def add_column(col): - s = col.get_column_def() - if col.primary_key: - keys = (pkeys if col.partition_key else ckeys) - keys.append('"{}"'.format(col.db_field_name)) - qtypes.append(s) - for name, col in model._columns.items(): - add_column(col) - - qtypes.append('PRIMARY KEY (({}){})'.format(', '.join(pkeys), ckeys and ', ' + ', '.join(ckeys) or '')) - - qs += ['({})'.format(', '.join(qtypes))] - - with_qs = ['read_repair_chance = {}'.format(model.__read_repair_chance__)] - - _order = ['"{}" {}'.format(c.db_field_name, c.clustering_order or 'ASC') for c in model._clustering_keys.values()] - if _order: - with_qs.append('clustering order by ({})'.format(', '.join(_order))) - - - # add read_repair_chance - qs += ['WITH {}'.format(' AND '.join(with_qs))] - qs = ' '.join(qs) + qs = get_create_table(model) try: execute(qs) @@ -151,6 +123,40 @@ def add_column(col): # index already exists pass +def get_create_table(model): + cf_name = model.column_family_name() + qs = ['CREATE TABLE {}'.format(cf_name)] + + #add column types + pkeys = [] # primary keys + ckeys = [] # clustering keys + qtypes = [] # field types + def add_column(col): + s = col.get_column_def() + if col.primary_key: + keys = (pkeys if col.partition_key else ckeys) + keys.append('"{}"'.format(col.db_field_name)) + qtypes.append(s) + for name, col in model._columns.items(): + add_column(col) + + qtypes.append('PRIMARY KEY (({}){})'.format(', '.join(pkeys), ckeys and ', ' + ', '.join(ckeys) or '')) + + qs += ['({})'.format(', '.join(qtypes))] + + with_qs = ['read_repair_chance = {}'.format(model.__read_repair_chance__)] + + _order = ['"{}" {}'.format(c.db_field_name, c.clustering_order or 'ASC') for c in model._clustering_keys.values()] + if _order: + with_qs.append('clustering order by ({})'.format(', '.join(_order))) + + + # add read_repair_chance + qs += ['WITH {}'.format(' AND '.join(with_qs))] + qs = ' '.join(qs) + return qs + + def get_compaction_options(model): """ Generates dictionary (later converted to a string) for creating and altering From 9ea1c7859e507d5767e33297b2bab31c4bc2fbfd Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 15 Aug 2013 15:47:52 -0700 Subject: [PATCH 0345/3726] options added to create string --- cqlengine/management.py | 8 ++++++++ cqlengine/tests/management/test_management.py | 6 +++++- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/cqlengine/management.py b/cqlengine/management.py index 306d330cf3..5c9350a326 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -147,12 +147,20 @@ def add_column(col): with_qs = ['read_repair_chance = {}'.format(model.__read_repair_chance__)] _order = ['"{}" {}'.format(c.db_field_name, c.clustering_order or 'ASC') for c in model._clustering_keys.values()] + if _order: with_qs.append('clustering order by ({})'.format(', '.join(_order))) + compaction_options = get_compaction_options(model) + + if compaction_options: + compaction_options = json.dumps(compaction_options).replace('"', "'") + with_qs.append("compaction = {}".format(compaction_options)) # add read_repair_chance qs += ['WITH {}'.format(' AND '.join(with_qs))] + + qs = ' '.join(qs) return qs diff --git a/cqlengine/tests/management/test_management.py b/cqlengine/tests/management/test_management.py index d308bb5038..ab27b132cc 100644 --- a/cqlengine/tests/management/test_management.py +++ b/cqlengine/tests/management/test_management.py @@ -1,5 +1,5 @@ from cqlengine.exceptions import CQLEngineException -from cqlengine.management import create_table, delete_table, get_fields, get_compaction_options +from cqlengine.management import create_table, delete_table, get_fields, get_compaction_options, get_create_table from cqlengine.tests.base import BaseCassEngTestCase from cqlengine.connection import ConnectionPool, Host @@ -216,4 +216,8 @@ def test_min_sstable_size_fails(self): def test_sstable_size_in_mb(self): with patch.object(self.model, '__compaction_sstable_size_in_mb__', 32): result = get_compaction_options(self.model) + assert result['sstable_size_in_mb'] == 32 + + + From f7b9953e0a035bf7f21222c35f6d02f61c01a84b Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 15 Aug 2013 16:11:18 -0700 Subject: [PATCH 0346/3726] test for leveled compaction working --- changelog | 9 +++++++++ cqlengine/management.py | 6 ++++++ cqlengine/tests/management/test_management.py | 11 ++++++++++- 3 files changed, 25 insertions(+), 1 deletion(-) diff --git a/changelog b/changelog index 0da8f3c7e9..2ed482986a 100644 --- a/changelog +++ b/changelog @@ -1,5 +1,14 @@ CHANGELOG +0.7.0 +* added counter columns +* added support for compaction settings at the model level +* deprecated delete_table in favor of drop_table +* deprecated create_table in favor of sync_table + +0.6.0 +* added table sync + 0.5.2 * adding hex conversion to Bytes column diff --git a/cqlengine/management.py b/cqlengine/management.py index 5c9350a326..eae327a6ae 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -1,4 +1,5 @@ import json +import warnings from cqlengine import SizeTieredCompactionStrategy, LeveledCompactionStrategy from cqlengine.connection import connection_manager, execute @@ -49,6 +50,7 @@ def delete_keyspace(name): execute("DROP KEYSPACE {}".format(name)) def create_table(model, create_missing_keyspace=True): + warnings.warn("create_table has been deprecated in favor of sync_table and will be removed in a future release", DeprecationWarning) sync_table(model, create_missing_keyspace) def sync_table(model, create_missing_keyspace=True): @@ -217,6 +219,10 @@ def get_fields(model): # convert to Field named tuples def delete_table(model): + warnings.warn("delete_table has been deprecated in favor of drop_table()", DeprecationWarning) + return drop_table(model) + +def drop_table(model): # don't try to delete non existant tables ks_name = model._get_keyspace() diff --git a/cqlengine/tests/management/test_management.py b/cqlengine/tests/management/test_management.py index ab27b132cc..a89c0b4084 100644 --- a/cqlengine/tests/management/test_management.py +++ b/cqlengine/tests/management/test_management.py @@ -1,5 +1,5 @@ from cqlengine.exceptions import CQLEngineException -from cqlengine.management import create_table, delete_table, get_fields, get_compaction_options, get_create_table +from cqlengine.management import create_table, delete_table, get_fields, get_compaction_options, get_create_table, sync_table, drop_table from cqlengine.tests.base import BaseCassEngTestCase from cqlengine.connection import ConnectionPool, Host @@ -219,5 +219,14 @@ def test_sstable_size_in_mb(self): assert result['sstable_size_in_mb'] == 32 + def test_create_table(self): + class LeveledcompactionTestTable(Model): + __compaction__ = LeveledCompactionStrategy + __compaction_sstable_size_in_mb__ = 64 + user_id = columns.UUID(primary_key=True) + name = columns.Text() + + drop_table(LeveledcompactionTestTable) + sync_table(LeveledcompactionTestTable) From 6148b9edbf0c77d4d77e75d6654033d356e2d6a8 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 15 Aug 2013 16:44:31 -0700 Subject: [PATCH 0347/3726] working on alters --- cqlengine/management.py | 15 ++++++++++++++- cqlengine/tests/management/test_management.py | 5 +++++ 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/cqlengine/management.py b/cqlengine/management.py index eae327a6ae..ddcbdaa213 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -75,7 +75,6 @@ def sync_table(model, create_missing_keyspace=True): tables = [x[0] for x in tables.results] #check for an existing column family - #TODO: check system tables instead of using cql thrifteries if raw_cf_name not in tables: qs = get_create_table(model) @@ -218,6 +217,20 @@ def get_fields(model): return [Field(x[0], x[1]) for x in tmp.results] # convert to Field named tuples +def get_compaction_settings(model): + # returns a dictionary of compaction settings in an existing table + ks_name = model._get_keyspace() + col_family = model.column_family_name(include_keyspace=False) + with connection_manager() as con: + query = "SELECT , validator FROM system.schema_columns \ + WHERE keyspace_name = :ks_name AND columnfamily_name = :col_family" + + logger.debug("get_fields %s %s", ks_name, col_family) + + tmp = con.execute(query, {'ks_name':ks_name, 'col_family':col_family}) + import ipdb; ipdb.set_trace() + + def delete_table(model): warnings.warn("delete_table has been deprecated in favor of drop_table()", DeprecationWarning) return drop_table(model) diff --git a/cqlengine/tests/management/test_management.py b/cqlengine/tests/management/test_management.py index a89c0b4084..6f1a833749 100644 --- a/cqlengine/tests/management/test_management.py +++ b/cqlengine/tests/management/test_management.py @@ -229,4 +229,9 @@ class LeveledcompactionTestTable(Model): drop_table(LeveledcompactionTestTable) sync_table(LeveledcompactionTestTable) + LeveledcompactionTestTable.__compaction__ = SizeTieredCompactionStrategy + LeveledcompactionTestTable.__compaction_sstable_size_in_mb__ = None + + sync_table(LeveledcompactionTestTable) + From a78e4ee54ffb592c92c938b4d82297ca5e2288bb Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 15 Aug 2013 18:54:56 -0700 Subject: [PATCH 0348/3726] moved compaction settings to it's own test module --- cqlengine/management.py | 23 ++-- .../management/test_compaction_settings.py | 104 ++++++++++++++++++ cqlengine/tests/management/test_management.py | 102 +---------------- 3 files changed, 123 insertions(+), 106 deletions(-) create mode 100644 cqlengine/tests/management/test_compaction_settings.py diff --git a/cqlengine/management.py b/cqlengine/management.py index ddcbdaa213..0267a719ca 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -1,6 +1,7 @@ import json import warnings from cqlengine import SizeTieredCompactionStrategy, LeveledCompactionStrategy +from cqlengine.named import NamedTable from cqlengine.connection import connection_manager, execute from cqlengine.exceptions import CQLEngineException @@ -12,6 +13,10 @@ logger = logging.getLogger(__name__) +# system keyspaces +schema_columnfamilies = NamedTable('system', 'schema_columnfamilies') + + def create_keyspace(name, strategy_class='SimpleStrategy', replication_factor=3, durable_writes=True, **replication_values): """ creates a keyspace @@ -98,6 +103,10 @@ def sync_table(model, create_missing_keyspace=True): logger.debug(query) execute(query) + update_compaction(model) + # update compaction + + #get existing index names, skip ones that already exist with connection_manager() as con: @@ -217,18 +226,16 @@ def get_fields(model): return [Field(x[0], x[1]) for x in tmp.results] # convert to Field named tuples -def get_compaction_settings(model): - # returns a dictionary of compaction settings in an existing table + +def update_compaction(model): ks_name = model._get_keyspace() col_family = model.column_family_name(include_keyspace=False) - with connection_manager() as con: - query = "SELECT , validator FROM system.schema_columns \ - WHERE keyspace_name = :ks_name AND columnfamily_name = :col_family" - logger.debug("get_fields %s %s", ks_name, col_family) + row = schema_columnfamilies.get(keyspace_name=ks_name, + columnfamily_name=col_family) + # check compaction_strategy_class + # check compaction_strategy_options - tmp = con.execute(query, {'ks_name':ks_name, 'col_family':col_family}) - import ipdb; ipdb.set_trace() def delete_table(model): diff --git a/cqlengine/tests/management/test_compaction_settings.py b/cqlengine/tests/management/test_compaction_settings.py new file mode 100644 index 0000000000..6242be654c --- /dev/null +++ b/cqlengine/tests/management/test_compaction_settings.py @@ -0,0 +1,104 @@ +import copy +from mock import patch +from cqlengine import Model, columns, SizeTieredCompactionStrategy, LeveledCompactionStrategy +from cqlengine.exceptions import CQLEngineException +from cqlengine.management import get_compaction_options, drop_table, sync_table +from cqlengine.tests.base import BaseCassEngTestCase + + +class CompactionModel(Model): + __compaction__ = None + cid = columns.UUID(primary_key=True) + name = columns.Text() + + +class BaseCompactionTest(BaseCassEngTestCase): + def assert_option_fails(self, key): + # key is a normal_key, converted to + # __compaction_key__ + + key = "__compaction_{}__".format(key) + + with patch.object(self.model, key, 10), \ + self.assertRaises(CQLEngineException): + get_compaction_options(self.model) + + +class SizeTieredCompactionTest(BaseCompactionTest): + + def setUp(self): + self.model = copy.deepcopy(CompactionModel) + self.model.__compaction__ = SizeTieredCompactionStrategy + + def test_size_tiered(self): + result = get_compaction_options(self.model) + assert result['class'] == SizeTieredCompactionStrategy + + def test_min_threshold(self): + self.model.__compaction_min_threshold__ = 2 + result = get_compaction_options(self.model) + assert result['min_threshold'] == 2 + + +class LeveledCompactionTest(BaseCompactionTest): + def setUp(self): + self.model = copy.deepcopy(CompactionLeveledStrategyModel) + + def test_simple_leveled(self): + result = get_compaction_options(self.model) + assert result['class'] == LeveledCompactionStrategy + + def test_bucket_high_fails(self): + self.assert_option_fails('bucket_high') + + def test_bucket_low_fails(self): + self.assert_option_fails('bucket_low') + + def test_max_threshold_fails(self): + self.assert_option_fails('max_threshold') + + def test_min_threshold_fails(self): + self.assert_option_fails('min_threshold') + + def test_min_sstable_size_fails(self): + self.assert_option_fails('min_sstable_size') + + def test_sstable_size_in_mb(self): + with patch.object(self.model, '__compaction_sstable_size_in_mb__', 32): + result = get_compaction_options(self.model) + + assert result['sstable_size_in_mb'] == 32 + + def test_create_table(self): + class LeveledcompactionTestTable(Model): + __compaction__ = LeveledCompactionStrategy + __compaction_sstable_size_in_mb__ = 64 + user_id = columns.UUID(primary_key=True) + name = columns.Text() + + drop_table(LeveledcompactionTestTable) + sync_table(LeveledcompactionTestTable) + + LeveledcompactionTestTable.__compaction__ = SizeTieredCompactionStrategy + LeveledcompactionTestTable.__compaction_sstable_size_in_mb__ = None + + sync_table(LeveledcompactionTestTable) + + +class EmptyCompactionTest(BaseCassEngTestCase): + def test_empty_compaction(self): + self.model = copy.deepcopy(CompactionModel) + result = get_compaction_options(self.model) + self.assertIsNone(result) + + +class CompactionLeveledStrategyModel(Model): + __compaction__ = LeveledCompactionStrategy + cid = columns.UUID(primary_key=True) + name = columns.Text() + + +class CompactionSizeTieredModel(Model): + __compaction__ = SizeTieredCompactionStrategy + cid = columns.UUID(primary_key=True) + name = columns.Text() diff --git a/cqlengine/tests/management/test_management.py b/cqlengine/tests/management/test_management.py index 6f1a833749..88405b1e40 100644 --- a/cqlengine/tests/management/test_management.py +++ b/cqlengine/tests/management/test_management.py @@ -1,11 +1,10 @@ +from mock import MagicMock, patch + from cqlengine.exceptions import CQLEngineException -from cqlengine.management import create_table, delete_table, get_fields, get_compaction_options, get_create_table, sync_table, drop_table +from cqlengine.management import create_table, delete_table, get_fields from cqlengine.tests.base import BaseCassEngTestCase - from cqlengine.connection import ConnectionPool, Host - -from mock import MagicMock, patch -from cqlengine import management, SizeTieredCompactionStrategy, LeveledCompactionStrategy +from cqlengine import management from cqlengine.tests.query.test_queryset import TestModel from cqlengine.models import Model from cqlengine import columns @@ -141,97 +140,4 @@ def test_add_column(self): fields = get_fields(FirstModel) self.assertEqual(len(fields), 4) -class CompactionModel(Model): - __compaction__ = None - cid = columns.UUID(primary_key=True) - name = columns.Text() - -class CompactionSizeTieredModel(Model): - __compaction__ = SizeTieredCompactionStrategy - cid = columns.UUID(primary_key=True) - name = columns.Text() - -class CompactionLeveledStrategyModel(Model): - __compaction__ = LeveledCompactionStrategy - cid = columns.UUID(primary_key=True) - name = columns.Text() - -import copy - -class EmptyCompactionTest(BaseCassEngTestCase): - def test_empty_compaction(self): - self.model = copy.deepcopy(CompactionModel) - result = get_compaction_options(self.model) - self.assertIsNone(result) - -class BaseCompactionTest(BaseCassEngTestCase): - def assert_option_fails(self, key): - # key is a normal_key, converted to - # __compaction_key__ - - key = "__compaction_{}__".format(key) - - with patch.object(self.model, key, 10), \ - self.assertRaises(CQLEngineException): - get_compaction_options(self.model) - -class SizeTieredCompactionTest(BaseCompactionTest): - - def setUp(self): - self.model = copy.deepcopy(CompactionModel) - self.model.__compaction__ = SizeTieredCompactionStrategy - - def test_size_tiered(self): - result = get_compaction_options(self.model) - assert result['class'] == SizeTieredCompactionStrategy - - def test_min_threshold(self): - self.model.__compaction_min_threshold__ = 2 - result = get_compaction_options(self.model) - assert result['min_threshold'] == 2 - -class LeveledCompactionTest(BaseCompactionTest): - def setUp(self): - self.model = copy.deepcopy(CompactionLeveledStrategyModel) - - def test_simple_leveled(self): - result = get_compaction_options(self.model) - assert result['class'] == LeveledCompactionStrategy - - def test_bucket_high_fails(self): - self.assert_option_fails('bucket_high') - - def test_bucket_low_fails(self): - self.assert_option_fails('bucket_low') - - def test_max_threshold_fails(self): - self.assert_option_fails('max_threshold') - - def test_min_threshold_fails(self): - self.assert_option_fails('min_threshold') - - def test_min_sstable_size_fails(self): - self.assert_option_fails('min_sstable_size') - - def test_sstable_size_in_mb(self): - with patch.object(self.model, '__compaction_sstable_size_in_mb__', 32): - result = get_compaction_options(self.model) - - assert result['sstable_size_in_mb'] == 32 - - def test_create_table(self): - class LeveledcompactionTestTable(Model): - __compaction__ = LeveledCompactionStrategy - __compaction_sstable_size_in_mb__ = 64 - user_id = columns.UUID(primary_key=True) - name = columns.Text() - - drop_table(LeveledcompactionTestTable) - sync_table(LeveledcompactionTestTable) - - LeveledcompactionTestTable.__compaction__ = SizeTieredCompactionStrategy - LeveledcompactionTestTable.__compaction_sstable_size_in_mb__ = None - - sync_table(LeveledcompactionTestTable) - From f3c6d32d08b5b394604ba0bbd83f30d8c6faa93d Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Fri, 16 Aug 2013 08:29:02 -0700 Subject: [PATCH 0349/3726] fixing tablename and keyspace name class args --- docs/topics/models.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/topics/models.rst b/docs/topics/models.rst index 3360ba7fee..c030c76445 100644 --- a/docs/topics/models.rst +++ b/docs/topics/models.rst @@ -138,11 +138,11 @@ Model Attributes *Optional.* Indicates that this model is only intended to be used as a base class for other models. You can't create tables for abstract models, but checks around schema validity are skipped during class construction. - .. attribute:: Model.table_name + .. attribute:: Model.__table_name__ *Optional.* Sets the name of the CQL table for this model. If left blank, the table name will be the name of the model, with it's module name as it's prefix. Manually defined table names are not inherited. - .. attribute:: Model.keyspace + .. attribute:: Model.__keyspace__ *Optional.* Sets the name of the keyspace used by this model. Defaulst to cqlengine From f33627c9791a53aa5efeb9a330d761fc8ae2e7eb Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Fri, 16 Aug 2013 11:26:06 -0700 Subject: [PATCH 0350/3726] making sure update table is called --- cqlengine/management.py | 5 ++-- .../management/test_compaction_settings.py | 27 ++++++++++++------- 2 files changed, 19 insertions(+), 13 deletions(-) diff --git a/cqlengine/management.py b/cqlengine/management.py index 0267a719ca..820d8236e2 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -103,9 +103,7 @@ def sync_table(model, create_missing_keyspace=True): logger.debug(query) execute(query) - update_compaction(model) - # update compaction - + update_compaction(model) #get existing index names, skip ones that already exist @@ -228,6 +226,7 @@ def get_fields(model): def update_compaction(model): + logger.debug("Checking %s for compaction differences", model) ks_name = model._get_keyspace() col_family = model.column_family_name(include_keyspace=False) diff --git a/cqlengine/tests/management/test_compaction_settings.py b/cqlengine/tests/management/test_compaction_settings.py index 6242be654c..4ef3cc170c 100644 --- a/cqlengine/tests/management/test_compaction_settings.py +++ b/cqlengine/tests/management/test_compaction_settings.py @@ -1,5 +1,6 @@ import copy -from mock import patch +from time import sleep +from mock import patch, MagicMock from cqlengine import Model, columns, SizeTieredCompactionStrategy, LeveledCompactionStrategy from cqlengine.exceptions import CQLEngineException from cqlengine.management import get_compaction_options, drop_table, sync_table @@ -69,20 +70,26 @@ def test_sstable_size_in_mb(self): assert result['sstable_size_in_mb'] == 32 - def test_create_table(self): - class LeveledcompactionTestTable(Model): - __compaction__ = LeveledCompactionStrategy - __compaction_sstable_size_in_mb__ = 64 - user_id = columns.UUID(primary_key=True) - name = columns.Text() +class LeveledcompactionTestTable(Model): + __compaction__ = LeveledCompactionStrategy + __compaction_sstable_size_in_mb__ = 64 + + user_id = columns.UUID(primary_key=True) + name = columns.Text() + + +class AlterTableTest(BaseCassEngTestCase): + + def test_alter_is_called_table(self): drop_table(LeveledcompactionTestTable) sync_table(LeveledcompactionTestTable) + with patch('cqlengine.management.update_compaction') as mock: + mock.return_value = True + sync_table(LeveledcompactionTestTable) + assert mock.called == 1 - LeveledcompactionTestTable.__compaction__ = SizeTieredCompactionStrategy - LeveledcompactionTestTable.__compaction_sstable_size_in_mb__ = None - sync_table(LeveledcompactionTestTable) class EmptyCompactionTest(BaseCassEngTestCase): From edf0dc06b7b18df61ad3a778c7e059eaceefc1b1 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Fri, 16 Aug 2013 11:26:58 -0700 Subject: [PATCH 0351/3726] removed unnecessary return value --- cqlengine/tests/management/test_compaction_settings.py | 1 - 1 file changed, 1 deletion(-) diff --git a/cqlengine/tests/management/test_compaction_settings.py b/cqlengine/tests/management/test_compaction_settings.py index 4ef3cc170c..cc568201f0 100644 --- a/cqlengine/tests/management/test_compaction_settings.py +++ b/cqlengine/tests/management/test_compaction_settings.py @@ -85,7 +85,6 @@ def test_alter_is_called_table(self): drop_table(LeveledcompactionTestTable) sync_table(LeveledcompactionTestTable) with patch('cqlengine.management.update_compaction') as mock: - mock.return_value = True sync_table(LeveledcompactionTestTable) assert mock.called == 1 From d43bf1c9559420a7ca04511764ba98692c4166fa Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Fri, 16 Aug 2013 12:07:29 -0700 Subject: [PATCH 0352/3726] working on updating compaction strategy --- cqlengine/management.py | 8 +++++++- .../tests/management/test_compaction_settings.py | 13 +++++++++++++ 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/cqlengine/management.py b/cqlengine/management.py index 820d8236e2..8fb447c2aa 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -187,6 +187,12 @@ def get_compaction_options(model): result = {'class':model.__compaction__} def setter(key, limited_to_strategy = None): + """ + sets key in result, checking if the key is limited to either SizeTiered or Leveled + :param key: one of the compaction options, like "bucket_high" + :param limited_to_strategy: SizeTieredCompactionStrategy, LeveledCompactionStrategy + :return: + """ mkey = "__compaction_{}__".format(key) tmp = getattr(model, mkey) if tmp and limited_to_strategy and limited_to_strategy != model.__compaction__: @@ -234,7 +240,7 @@ def update_compaction(model): columnfamily_name=col_family) # check compaction_strategy_class # check compaction_strategy_options - + return True def delete_table(model): diff --git a/cqlengine/tests/management/test_compaction_settings.py b/cqlengine/tests/management/test_compaction_settings.py index cc568201f0..d6d96ad53c 100644 --- a/cqlengine/tests/management/test_compaction_settings.py +++ b/cqlengine/tests/management/test_compaction_settings.py @@ -78,6 +78,7 @@ class LeveledcompactionTestTable(Model): user_id = columns.UUID(primary_key=True) name = columns.Text() +from cqlengine.management import schema_columnfamilies class AlterTableTest(BaseCassEngTestCase): @@ -88,6 +89,18 @@ def test_alter_is_called_table(self): sync_table(LeveledcompactionTestTable) assert mock.called == 1 + def test_alter_actually_alters(self): + tmp = copy.deepcopy(LeveledcompactionTestTable) + drop_table(tmp) + sync_table(tmp) + tmp.__compaction__ = SizeTieredCompactionStrategy + tmp.__compaction_sstable_size_in_mb__ = None + sync_table(tmp) + + table_settings = schema_columnfamilies.get(keyspace_name=tmp._get_keyspace(), + columnfamily_name=tmp.column_family_name(include_keyspace=False)) + self.assertRegexpMatches(table_settings['compaction_strategy_class'], '.*SizeTieredCompactionStrategy$') + From b5d69d0906b7080f1933df22f36fed75ccab6d4d Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Fri, 16 Aug 2013 16:00:06 -0700 Subject: [PATCH 0353/3726] updating column option docs --- docs/topics/models.rst | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/docs/topics/models.rst b/docs/topics/models.rst index c030c76445..86e0840d73 100644 --- a/docs/topics/models.rst +++ b/docs/topics/models.rst @@ -67,15 +67,19 @@ Column Types Column Options -------------- - Each column can be defined with optional arguments to modify the way they behave. While some column types may define additional column options, these are the options that are available on all columns: + Each column can be defined with optional arguments to modify the way they behave. While some column types may + define additional column options, these are the options that are available on all columns: :attr:`~cqlengine.columns.BaseColumn.primary_key` If True, this column is created as a primary key field. A model can have multiple primary keys. Defaults to False. - *In CQL, there are 2 types of primary keys: partition keys and clustering keys. As with CQL, the first primary key is the partition key, and all others are clustering keys, unless partition keys are specified manually using* :attr:`~cqlengine.columns.BaseColumn.partition_key` + *In CQL, there are 2 types of primary keys: partition keys and clustering keys. As with CQL, the first + primary key is the partition key, and all others are clustering keys, unless partition keys are specified + manually using* :attr:`~cqlengine.columns.BaseColumn.partition_key` :attr:`~cqlengine.columns.BaseColumn.partition_key` - If True, this column is created as partition primary key. There may be many partition keys defined, forming *composite partition key* + If True, this column is created as partition primary key. There may be many partition keys defined, + forming a *composite partition key* :attr:`~cqlengine.columns.BaseColumn.index` If True, an index will be created for this column. Defaults to False. @@ -83,13 +87,16 @@ Column Options *Note: Indexes can only be created on models with one primary key* :attr:`~cqlengine.columns.BaseColumn.db_field` - Explicitly sets the name of the column in the database table. If this is left blank, the column name will be the same as the name of the column attribute. Defaults to None. + Explicitly sets the name of the column in the database table. If this is left blank, the column name will be + the same as the name of the column attribute. Defaults to None. :attr:`~cqlengine.columns.BaseColumn.default` - The default value for this column. If a model instance is saved without a value for this column having been defined, the default value will be used. This can be either a value or a callable object (ie: datetime.now is a valid default argument). + The default value for this column. If a model instance is saved without a value for this column having been + defined, the default value will be used. This can be either a value or a callable object (ie: datetime.now is a valid default argument). + Callable defaults will be called each time a default is assigned to a None value :attr:`~cqlengine.columns.BaseColumn.required` - If True, this model cannot be saved without a value defined for this column. Defaults to True. Primary key fields cannot have their required fields set to False. + If True, this model cannot be saved without a value defined for this column. Defaults to False. Primary key fields always require values. Model Methods ============= From b2eace8bd49825421d12291dfb441fa32542e1f2 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Mon, 19 Aug 2013 12:02:51 -0700 Subject: [PATCH 0354/3726] altering correctly on incorrect compaction strategy --- cqlengine/management.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/cqlengine/management.py b/cqlengine/management.py index 8fb447c2aa..3ac4dd1f04 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -239,8 +239,14 @@ def update_compaction(model): row = schema_columnfamilies.get(keyspace_name=ks_name, columnfamily_name=col_family) # check compaction_strategy_class + do_update = not row['compaction_strategy_class'].endswith(model.__compaction__) + # check compaction_strategy_options - return True + if do_update: + options = get_compaction_options(model) + options = json.dumps(options).replace('"', "'") + cf_name = model.column_family_name() + execute("ALTER TABLE {} with compaction = {}".format(cf_name, options)) def delete_table(model): From cb925d342cb8cd3db92f4fc2bf47c67c37e40ad0 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Mon, 19 Aug 2013 13:12:19 -0700 Subject: [PATCH 0355/3726] verifying alter table with compaction" --- cqlengine/management.py | 20 +++++++++++++++---- .../management/test_compaction_settings.py | 15 +++++++++++++- 2 files changed, 30 insertions(+), 5 deletions(-) diff --git a/cqlengine/management.py b/cqlengine/management.py index 3ac4dd1f04..8aeb55276c 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -182,7 +182,7 @@ def get_compaction_options(model): :return: """ if not model.__compaction__: - return None + return {} result = {'class':model.__compaction__} @@ -239,12 +239,24 @@ def update_compaction(model): row = schema_columnfamilies.get(keyspace_name=ks_name, columnfamily_name=col_family) # check compaction_strategy_class - do_update = not row['compaction_strategy_class'].endswith(model.__compaction__) + if model.__compaction__: + do_update = not row['compaction_strategy_class'].endswith(model.__compaction__) + else: + do_update = False + + existing_options = row['compaction_strategy_options'] + existing_options = json.loads(existing_options) + + desired_options = get_compaction_options(model) + desired_options.pop('class', None) + + for k,v in desired_options.items(): + if existing_options[k] != v: + do_update = True # check compaction_strategy_options if do_update: - options = get_compaction_options(model) - options = json.dumps(options).replace('"', "'") + options = json.dumps(get_compaction_options(model)).replace('"', "'") cf_name = model.column_family_name() execute("ALTER TABLE {} with compaction = {}".format(cf_name, options)) diff --git a/cqlengine/tests/management/test_compaction_settings.py b/cqlengine/tests/management/test_compaction_settings.py index d6d96ad53c..4a958f901d 100644 --- a/cqlengine/tests/management/test_compaction_settings.py +++ b/cqlengine/tests/management/test_compaction_settings.py @@ -101,6 +101,19 @@ def test_alter_actually_alters(self): columnfamily_name=tmp.column_family_name(include_keyspace=False)) self.assertRegexpMatches(table_settings['compaction_strategy_class'], '.*SizeTieredCompactionStrategy$') + def test_alter_options(self): + + class AlterTable(Model): + __compaction__ = LeveledCompactionStrategy + __compaction_sstable_size_in_mb__ = 64 + + user_id = columns.UUID(primary_key=True) + name = columns.Text() + + drop_table(AlterTable) + sync_table(AlterTable) + AlterTable.__compaction_sstable_size_in_mb__ = 128 + sync_table(AlterTable) @@ -108,7 +121,7 @@ class EmptyCompactionTest(BaseCassEngTestCase): def test_empty_compaction(self): self.model = copy.deepcopy(CompactionModel) result = get_compaction_options(self.model) - self.assertIsNone(result) + self.assertEqual({}, result) class CompactionLeveledStrategyModel(Model): From 32b005edb1883d179ebc2bf9ef3c6e89760b1167 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Mon, 19 Aug 2013 13:45:32 -0700 Subject: [PATCH 0356/3726] fixed broken test due to weird copying issue --- cqlengine/tests/management/test_compaction_settings.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/cqlengine/tests/management/test_compaction_settings.py b/cqlengine/tests/management/test_compaction_settings.py index 4a958f901d..07b72a8170 100644 --- a/cqlengine/tests/management/test_compaction_settings.py +++ b/cqlengine/tests/management/test_compaction_settings.py @@ -119,8 +119,12 @@ class AlterTable(Model): class EmptyCompactionTest(BaseCassEngTestCase): def test_empty_compaction(self): - self.model = copy.deepcopy(CompactionModel) - result = get_compaction_options(self.model) + class EmptyCompactionModel(Model): + __compaction__ = None + cid = columns.UUID(primary_key=True) + name = columns.Text() + + result = get_compaction_options(EmptyCompactionModel) self.assertEqual({}, result) From e7c83a0d69933d418b01c8202cc8c9e87b52f7c5 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Mon, 19 Aug 2013 15:49:26 -0700 Subject: [PATCH 0357/3726] adding support for using different queryset and dmlquery classes on a model --- cqlengine/models.py | 12 ++++-- .../tests/model/test_class_construction.py | 38 ++++++++++++++++++- 2 files changed, 46 insertions(+), 4 deletions(-) diff --git a/cqlengine/models.py b/cqlengine/models.py index d03a00415d..2c011b8e62 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -7,10 +7,12 @@ from cqlengine.query import DoesNotExist as _DoesNotExist from cqlengine.query import MultipleObjectsReturned as _MultipleObjectsReturned + class ModelDefinitionException(ModelException): pass DEFAULT_KEYSPACE = 'cqlengine' + class hybrid_classmethod(object): """ Allows a method to behave as both a class method and @@ -44,7 +46,7 @@ def __get__(self, obj, model): """ :rtype: ModelQuerySet """ if model.__abstract__: raise CQLEngineException('cannot execute queries against abstract models') - return ModelQuerySet(model) + return model.__queryset__(model) def __call__(self, *args, **kwargs): """ @@ -140,6 +142,10 @@ class MultipleObjectsReturned(_MultipleObjectsReturned): pass #the keyspace for this model __keyspace__ = None + # the queryset class used for this class + __queryset__ = ModelQuerySet + __dmlquery__ = DMLQuery + __read_repair_chance__ = 0.1 def __init__(self, **values): @@ -261,7 +267,7 @@ def get(cls, *args, **kwargs): def save(self): is_new = self.pk is None self.validate() - DMLQuery(self.__class__, self, batch=self._batch).save() + self.__dmlquery__(self.__class__, self, batch=self._batch).save() #reset the value managers for v in self._values.values(): @@ -272,7 +278,7 @@ def save(self): def delete(self): """ Deletes this instance """ - DMLQuery(self.__class__, self, batch=self._batch).delete() + self.__dmlquery__(self.__class__, self, batch=self._batch).delete() @classmethod def _class_batch(cls, batch): diff --git a/cqlengine/tests/model/test_class_construction.py b/cqlengine/tests/model/test_class_construction.py index f0344c6bd3..6653946bea 100644 --- a/cqlengine/tests/model/test_class_construction.py +++ b/cqlengine/tests/model/test_class_construction.py @@ -1,5 +1,5 @@ from uuid import uuid4 -from cqlengine.query import QueryException +from cqlengine.query import QueryException, ModelQuerySet, DMLQuery from cqlengine.tests.base import BaseCassEngTestCase from cqlengine.exceptions import ModelException, CQLEngineException @@ -300,6 +300,42 @@ def test_concrete_class_table_creation_cycle(self): delete_table(ConcreteModelWithCol) +class TestCustomQuerySet(BaseCassEngTestCase): + """ Tests overriding the default queryset class """ + + class TestException(Exception): pass + + def test_overriding_queryset(self): + + class QSet(ModelQuerySet): + def create(iself, **kwargs): + raise self.TestException + + class CQModel(Model): + __queryset__ = QSet + part = columns.UUID(primary_key=True) + data = columns.Text() + + with self.assertRaises(self.TestException): + CQModel.create(part=uuid4(), data='s') + + def test_overriding_dmlqueryset(self): + + class DMLQ(DMLQuery): + def save(iself): + raise self.TestException + + class CDQModel(Model): + __dmlquery__ = DMLQ + part = columns.UUID(primary_key=True) + data = columns.Text() + + with self.assertRaises(self.TestException): + CDQModel().save() + + + + From 29c8561049ab6333684ce56c398cbe95b849d3dc Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Tue, 20 Aug 2013 11:43:41 -0700 Subject: [PATCH 0358/3726] updated docs with options, added tests for compaction options --- cqlengine/management.py | 32 ++++++----- .../management/test_compaction_settings.py | 54 +++++++++++++++++-- docs/topics/models.rst | 50 ++++++++++++++--- 3 files changed, 114 insertions(+), 22 deletions(-) diff --git a/cqlengine/management.py b/cqlengine/management.py index 8aeb55276c..e05c7fc265 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -201,7 +201,6 @@ def setter(key, limited_to_strategy = None): if tmp: result[key] = tmp - setter('min_threshold') setter('tombstone_compaction_interval') setter('bucket_high', SizeTieredCompactionStrategy) @@ -231,18 +230,19 @@ def get_fields(model): # convert to Field named tuples +def get_table_settings(model): + return schema_columnfamilies.get(keyspace_name=model._get_keyspace(), + columnfamily_name=model.column_family_name(include_keyspace=False)) + + def update_compaction(model): logger.debug("Checking %s for compaction differences", model) - ks_name = model._get_keyspace() - col_family = model.column_family_name(include_keyspace=False) - - row = schema_columnfamilies.get(keyspace_name=ks_name, - columnfamily_name=col_family) + row = get_table_settings(model) # check compaction_strategy_class - if model.__compaction__: - do_update = not row['compaction_strategy_class'].endswith(model.__compaction__) - else: - do_update = False + if not model.__compaction__: + return + + do_update = not row['compaction_strategy_class'].endswith(model.__compaction__) existing_options = row['compaction_strategy_options'] existing_options = json.loads(existing_options) @@ -251,14 +251,19 @@ def update_compaction(model): desired_options.pop('class', None) for k,v in desired_options.items(): - if existing_options[k] != v: + val = existing_options.pop(k, None) + if val != v: do_update = True # check compaction_strategy_options if do_update: - options = json.dumps(get_compaction_options(model)).replace('"', "'") + options = get_compaction_options(model) + # jsonify + options = json.dumps(options).replace('"', "'") cf_name = model.column_family_name() - execute("ALTER TABLE {} with compaction = {}".format(cf_name, options)) + query = "ALTER TABLE {} with compaction = {}".format(cf_name, options) + logger.debug(query) + execute(query) def delete_table(model): @@ -281,3 +286,4 @@ def drop_table(model): cf_name = model.column_family_name() execute('drop table {};'.format(cf_name)) + diff --git a/cqlengine/tests/management/test_compaction_settings.py b/cqlengine/tests/management/test_compaction_settings.py index 07b72a8170..014abcb2db 100644 --- a/cqlengine/tests/management/test_compaction_settings.py +++ b/cqlengine/tests/management/test_compaction_settings.py @@ -1,9 +1,10 @@ import copy +import json from time import sleep from mock import patch, MagicMock from cqlengine import Model, columns, SizeTieredCompactionStrategy, LeveledCompactionStrategy from cqlengine.exceptions import CQLEngineException -from cqlengine.management import get_compaction_options, drop_table, sync_table +from cqlengine.management import get_compaction_options, drop_table, sync_table, get_table_settings from cqlengine.tests.base import BaseCassEngTestCase @@ -97,10 +98,11 @@ def test_alter_actually_alters(self): tmp.__compaction_sstable_size_in_mb__ = None sync_table(tmp) - table_settings = schema_columnfamilies.get(keyspace_name=tmp._get_keyspace(), - columnfamily_name=tmp.column_family_name(include_keyspace=False)) + table_settings = get_table_settings(tmp) + self.assertRegexpMatches(table_settings['compaction_strategy_class'], '.*SizeTieredCompactionStrategy$') + def test_alter_options(self): class AlterTable(Model): @@ -138,3 +140,49 @@ class CompactionSizeTieredModel(Model): __compaction__ = SizeTieredCompactionStrategy cid = columns.UUID(primary_key=True) name = columns.Text() + + + +class OptionsTest(BaseCassEngTestCase): + + def test_all_size_tiered_options(self): + class AllSizeTieredOptionsModel(Model): + __compaction__ = SizeTieredCompactionStrategy + __compaction_bucket_low__ = .3 + __compaction_bucket_high__ = 2 + __compaction_min_threshold__ = 2 + __compaction_max_threshold__ = 64 + __compaction_tombstone_compaction_interval__ = 86400 + + cid = columns.UUID(primary_key=True) + name = columns.Text() + + drop_table(AllSizeTieredOptionsModel) + sync_table(AllSizeTieredOptionsModel) + + settings = get_table_settings(AllSizeTieredOptionsModel) + options = json.loads(settings['compaction_strategy_options']) + expected = {u'min_threshold': u'2', + u'bucket_low': u'0.3', + u'tombstone_compaction_interval': u'86400', + u'bucket_high': u'2', + u'max_threshold': u'64'} + self.assertDictEqual(options, expected) + + + def test_all_leveled_options(self): + + class AllLeveledOptionsModel(Model): + __compaction__ = LeveledCompactionStrategy + __compaction_sstable_size_in_mb__ = 64 + + cid = columns.UUID(primary_key=True) + name = columns.Text() + + drop_table(AllLeveledOptionsModel) + sync_table(AllLeveledOptionsModel) + + settings = get_table_settings(AllLeveledOptionsModel) + options = json.loads(settings['compaction_strategy_options']) + self.assertDictEqual(options, {u'sstable_size_in_mb': u'64'}) + diff --git a/docs/topics/models.rst b/docs/topics/models.rst index 86e0840d73..4d9ab98a55 100644 --- a/docs/topics/models.rst +++ b/docs/topics/models.rst @@ -21,11 +21,11 @@ This example defines a Person table, with the columns ``first_name`` and ``last_ from cqlengine import columns from cqlengine.models import Model - + class Person(Model): first_name = columns.Text() last_name = columns.Text() - + The Person model would create this CQL table: @@ -83,7 +83,7 @@ Column Options :attr:`~cqlengine.columns.BaseColumn.index` If True, an index will be created for this column. Defaults to False. - + *Note: Indexes can only be created on models with one primary key* :attr:`~cqlengine.columns.BaseColumn.db_field` @@ -109,7 +109,7 @@ Model Methods *Example* .. code-block:: python - + #using the person model from earlier: class Person(Model): first_name = columns.Text() @@ -118,7 +118,7 @@ Model Methods person = Person(first_name='Blake', last_name='Eggleston') person.first_name #returns 'Blake' person.last_name #returns 'Eggleston' - + .. method:: save() @@ -135,7 +135,7 @@ Model Methods .. method:: delete() - + Deletes the object from the database. Model Attributes @@ -153,3 +153,41 @@ Model Attributes *Optional.* Sets the name of the keyspace used by this model. Defaulst to cqlengine + +Compaction Options +==================== + + As of cqlengine 0.6 we've added support for specifying compaction options. cqlengine will only use your compaction options if you have a strategy set. When a table is synced, it will be altered to match the compaction options set on your table. This means that if you are changing settings manually they will be changed back on resync. Do not use the compaction settings of cqlengine if you want to manage your compaction settings manually. + + cqlengine supports all compaction options as of Cassandra 1.2.8. + + Tables may either be + .. attribute:: Model.__compaction_bucket_high__ + + .. attribute:: Model.__compaction_bucket_low__ + + .. attribute:: Model.__compaction_max_compaction_threshold__ + + .. attribute:: Model.__compaction_min_compaction_threshold__ + + .. attribute:: Model.__compaction_min_sstable_size__ + + .. attribute:: Model.__compaction_sstable_size_in_mb__ + + .. attribute:: Model.__compaction_tombstone_compaction_interval__ + + .. attribute:: Model.__compaction_tombstone_threshold__ + + For example: + + .. code-block::python + + class User(Model): + __compaction__ = LeveledCompactionStrategy + __compaction_sstable_size_in_mb__ = 64 + __compaction_tombstone_threshold__ = .2 + + user_id = columns.UUID(primary_key=True) + name = columns.Text() + + From 4c53e3e2e40697e390d8b6d5ce73249d85687708 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Tue, 20 Aug 2013 13:23:52 -0700 Subject: [PATCH 0359/3726] updated version --- cqlengine/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cqlengine/VERSION b/cqlengine/VERSION index a918a2aa18..faef31a435 100644 --- a/cqlengine/VERSION +++ b/cqlengine/VERSION @@ -1 +1 @@ -0.6.0 +0.7.0 From f2f9230b5821990330003d5ed0757fdba8ccbb66 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Tue, 20 Aug 2013 13:32:55 -0700 Subject: [PATCH 0360/3726] fixed docs --- docs/topics/models.rst | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/docs/topics/models.rst b/docs/topics/models.rst index 4d9ab98a55..093f1cdd95 100644 --- a/docs/topics/models.rst +++ b/docs/topics/models.rst @@ -157,7 +157,7 @@ Model Attributes Compaction Options ==================== - As of cqlengine 0.6 we've added support for specifying compaction options. cqlengine will only use your compaction options if you have a strategy set. When a table is synced, it will be altered to match the compaction options set on your table. This means that if you are changing settings manually they will be changed back on resync. Do not use the compaction settings of cqlengine if you want to manage your compaction settings manually. + As of cqlengine 0.7 we've added support for specifying compaction options. cqlengine will only use your compaction options if you have a strategy set. When a table is synced, it will be altered to match the compaction options set on your table. This means that if you are changing settings manually they will be changed back on resync. Do not use the compaction settings of cqlengine if you want to manage your compaction settings manually. cqlengine supports all compaction options as of Cassandra 1.2.8. @@ -183,7 +183,7 @@ Compaction Options .. code-block::python class User(Model): - __compaction__ = LeveledCompactionStrategy + __compaction__ = cqlengine.LeveledCompactionStrategy __compaction_sstable_size_in_mb__ = 64 __compaction_tombstone_threshold__ = .2 @@ -191,3 +191,4 @@ Compaction Options name = columns.Text() + Tables may use LeveledCompactionStrategy or SizeTieredCompactionStrategy. Both options are available in the top level cqlengine module. From 705b18968f6d02c066b4126b0b8ef6a62fd2efb6 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Tue, 20 Aug 2013 13:35:08 -0700 Subject: [PATCH 0361/3726] fixing more docs errors --- docs/topics/models.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/topics/models.rst b/docs/topics/models.rst index 093f1cdd95..551056b55d 100644 --- a/docs/topics/models.rst +++ b/docs/topics/models.rst @@ -180,7 +180,7 @@ Compaction Options For example: - .. code-block::python + .. code-block:: python class User(Model): __compaction__ = cqlengine.LeveledCompactionStrategy From 2abdbb52454013c3a6ef7795eedbf252da97a0f1 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Tue, 20 Aug 2013 13:38:41 -0700 Subject: [PATCH 0362/3726] improving docs --- docs/topics/models.rst | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/docs/topics/models.rst b/docs/topics/models.rst index 551056b55d..17e7cbdcfc 100644 --- a/docs/topics/models.rst +++ b/docs/topics/models.rst @@ -190,5 +190,16 @@ Compaction Options user_id = columns.UUID(primary_key=True) name = columns.Text() + or for SizeTieredCompaction: - Tables may use LeveledCompactionStrategy or SizeTieredCompactionStrategy. Both options are available in the top level cqlengine module. + .. code-block:: python + + class TimeData(Model): + __compaction__ = SizeTieredCompactionStrategy + __compaction_bucket_low__ = .3 + __compaction_bucket_high__ = 2 + __compaction_min_threshold__ = 2 + __compaction_max_threshold__ = 64 + __compaction_tombstone_compaction_interval__ = 86400 + + Tables may use `LeveledCompactionStrategy` or `SizeTieredCompactionStrategy`. Both options are available in the top level cqlengine module. To reiterate, you will need to set your `__compaction__` option explicitly in order for cqlengine to handle any of your settings. From dc869c4916d636d93aebf3863dee13f13672f106 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Tue, 20 Aug 2013 13:40:37 -0700 Subject: [PATCH 0363/3726] updated readme and index to use sync_table rather than create_table --- README.md | 4 ++-- docs/index.rst | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 09e2014241..a1242e34fc 100644 --- a/README.md +++ b/README.md @@ -37,8 +37,8 @@ class ExampleModel(Model): >>> connection.setup(['127.0.0.1:9160']) #...and create your CQL table ->>> from cqlengine.management import create_table ->>> create_table(ExampleModel) +>>> from cqlengine.management import sync_table +>>> sync_table(ExampleModel) #now we can create some rows: >>> em1 = ExampleModel.create(example_type=0, description="example1", created_at=datetime.now()) diff --git a/docs/index.rst b/docs/index.rst index 1122599cca..b97360ebe6 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -17,7 +17,7 @@ Contents: .. toctree:: :maxdepth: 2 - + topics/models topics/queryset topics/columns @@ -46,8 +46,8 @@ Getting Started >>> connection.setup(['127.0.0.1:9160']) #...and create your CQL table - >>> from cqlengine.management import create_table - >>> create_table(ExampleModel) + >>> from cqlengine.management import sync_table + >>> sync_table(ExampleModel) #now we can create some rows: >>> em1 = ExampleModel.create(example_type=0, description="example1", created_at=datetime.now()) From 66930a00d96c0a9fee624efc00dee82036c3f4bb Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Tue, 20 Aug 2013 13:44:15 -0700 Subject: [PATCH 0364/3726] fixed author --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 5fd441428a..944d41191f 100644 --- a/setup.py +++ b/setup.py @@ -35,7 +35,7 @@ ], keywords='cassandra,cql,orm', install_requires = ['cql'], - author='Blake Eggleston', + author='Blake Eggleston, Jon Haddad', author_email='bdeggleston@gmail.com', url='https://github.com/cqlengine/cqlengine', license='BSD', From 9c5a0104f56beacee91e953173e246d1710f4c2b Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Tue, 20 Aug 2013 14:25:58 -0700 Subject: [PATCH 0365/3726] fixing docs --- docs/topics/models.rst | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/topics/models.rst b/docs/topics/models.rst index 17e7cbdcfc..961a866be9 100644 --- a/docs/topics/models.rst +++ b/docs/topics/models.rst @@ -161,7 +161,8 @@ Compaction Options cqlengine supports all compaction options as of Cassandra 1.2.8. - Tables may either be + Available Options: + .. attribute:: Model.__compaction_bucket_high__ .. attribute:: Model.__compaction_bucket_low__ From e6cb306771defd290ca1f21562f27fce29b1d0e4 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Tue, 20 Aug 2013 22:16:45 -0700 Subject: [PATCH 0366/3726] column docs --- docs/topics/columns.rst | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/docs/topics/columns.rst b/docs/topics/columns.rst index d323ac216a..daffa28a50 100644 --- a/docs/topics/columns.rst +++ b/docs/topics/columns.rst @@ -18,9 +18,9 @@ Columns .. class:: Ascii() Stores a US-ASCII character string :: - + columns.Ascii() - + .. class:: Text() @@ -93,15 +93,24 @@ Columns columns.Decimal() +.. class:: Counter() + + Counters can be incremented and decremented. + + ..code:: python + + columns.Counter() + + Collection Type Columns ---------------------------- CQLEngine also supports container column types. Each container column requires a column class argument to specify what type of objects it will hold. The Map column requires 2, one for the key, and the other for the value - + *Example* .. code-block:: python - + class Person(Model): id = columns.UUID(primary_key=True, default=uuid.uuid4) first_name = columns.Text() @@ -111,7 +120,7 @@ Collection Type Columns enemies = columns.Set(columns.Text) todo_list = columns.List(columns.Text) birthdays = columns.Map(columns.Text, columns.DateTime) - + .. class:: Set() From 9cbe8d7268797fd7c34495eaf9e9aec2fb0e2384 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Tue, 20 Aug 2013 22:17:34 -0700 Subject: [PATCH 0367/3726] typo --- docs/topics/columns.rst | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/docs/topics/columns.rst b/docs/topics/columns.rst index daffa28a50..5ee9a18686 100644 --- a/docs/topics/columns.rst +++ b/docs/topics/columns.rst @@ -95,11 +95,9 @@ Columns .. class:: Counter() - Counters can be incremented and decremented. + Counters can be incremented and decremented :: - ..code:: python - - columns.Counter() + columns.Counter() Collection Type Columns From 7354b5d646b3eb31dfc9a7173250c76472fdec3c Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 22 Aug 2013 15:26:41 -0700 Subject: [PATCH 0368/3726] adding dev requirements file --- requirements-dev.txt | 6 ++++++ 1 file changed, 6 insertions(+) create mode 100644 requirements-dev.txt diff --git a/requirements-dev.txt b/requirements-dev.txt new file mode 100644 index 0000000000..2773a99fe4 --- /dev/null +++ b/requirements-dev.txt @@ -0,0 +1,6 @@ +nose +nose-progressive +profilestats +pycallgraph +ipdbplugin==1.2 +ipdb==0.7 From 70f2c95c90e03ed04e5380190e394e9c8b8791c1 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 22 Aug 2013 15:29:29 -0700 Subject: [PATCH 0369/3726] splitting up instance constructor methods to make defining custom instantiation methods easier --- cqlengine/query.py | 39 ++++++++++++++++++++------------------- 1 file changed, 20 insertions(+), 19 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index 194b4c9330..4ad557d59a 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -352,7 +352,7 @@ def _execute_query(self): raise CQLEngineException("Only inserts, updates, and deletes are available in batch mode") if self._result_cache is None: columns, self._result_cache = execute(self._select_query(), self._where_values()) - self._construct_result = self._create_result_constructor(columns) + self._construct_result = self._get_result_constructor(columns) def _fill_result_cache_to_idx(self, idx): self._execute_query() @@ -408,7 +408,7 @@ def __getitem__(self, s): self._fill_result_cache_to_idx(s) return self._result_cache[s] - def _create_result_constructor(self, names): + def _get_result_constructor(self, names): """ Returns a function that will be used to instantiate query results """ @@ -650,7 +650,7 @@ def _get_select_statement(self): """ Returns the fields to be returned by the select query """ return 'SELECT *' - def _create_result_constructor(self, names): + def _get_result_constructor(self, names): """ Returns a function that will be used to instantiate query results """ @@ -697,26 +697,27 @@ def _get_select_statement(self): db_fields = [self.model._columns[f].db_field_name for f in fields] return 'SELECT {}'.format(', '.join(['"{}"'.format(f) for f in db_fields])) - def _create_result_constructor(self, names): - """ - Returns a function that will be used to instantiate query results - """ + def _get_instance_constructor(self, names): + """ returns a function used to construct model instances """ model = self.model db_map = model._db_map + def _construct_instance(values): + field_dict = dict((db_map.get(k, k), v) for k, v in zip(names, values)) + instance = model(**field_dict) + instance._is_persisted = True + return instance + return _construct_instance + + def _get_result_constructor(self, names): + """ Returns a function that will be used to instantiate query results """ if not self._values_list: - def _construct_instance(values): - field_dict = dict((db_map.get(k, k), v) for k, v in zip(names, values)) - instance = model(**field_dict) - instance._is_persisted = True - return instance - return _construct_instance - - columns = [model._columns[n] for n in names] - if self._flat_values_list: - return (lambda values: columns[0].to_python(values[0])) + return self._get_instance_constructor(names) else: - # result_cls = namedtuple("{}Tuple".format(self.model.__name__), names) - return (lambda values: map(lambda (c, v): c.to_python(v), zip(columns, values))) + columns = [self.model._columns[n] for n in names] + if self._flat_values_list: + return lambda values: columns[0].to_python(values[0]) + else: + return lambda values: map(lambda (c, v): c.to_python(v), zip(columns, values)) def _get_ordering_condition(self, colname): colname, order_type = super(ModelQuerySet, self)._get_ordering_condition(colname) From f70973a90a07b007c8038756dad32446b5439eef Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 22 Aug 2013 15:32:23 -0700 Subject: [PATCH 0370/3726] updating --- changelog | 3 +++ cqlengine/VERSION | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/changelog b/changelog index 6a72c4bd18..8dd2fe8608 100644 --- a/changelog +++ b/changelog @@ -1,5 +1,8 @@ CHANGELOG +0.7.1 +* refactoring query class to make defining custom model instantiation logic easier + 0.7.0 * added counter columns * added support for compaction settings at the model level diff --git a/cqlengine/VERSION b/cqlengine/VERSION index faef31a435..39e898a4f9 100644 --- a/cqlengine/VERSION +++ b/cqlengine/VERSION @@ -1 +1 @@ -0.7.0 +0.7.1 From d2b86bb31aefc23a37ab310c7e79e17943beb573 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 22 Aug 2013 15:44:18 -0700 Subject: [PATCH 0371/3726] removing table_name inheritance short circuit, which wasn't working anyway --- cqlengine/models.py | 3 --- cqlengine/tests/model/test_class_construction.py | 4 ---- 2 files changed, 7 deletions(-) diff --git a/cqlengine/models.py b/cqlengine/models.py index 18a49f7f7b..3212d67244 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -401,9 +401,6 @@ def _transform_column(col_name, col_obj): for field_name, col in column_dict.items(): db_map[col.db_field_name] = field_name - #short circuit table_name inheritance - attrs['table_name'] = attrs.get('__table_name__') - #add management members to the class attrs['_columns'] = column_dict attrs['_primary_keys'] = primary_keys diff --git a/cqlengine/tests/model/test_class_construction.py b/cqlengine/tests/model/test_class_construction.py index 6653946bea..6ae46753ec 100644 --- a/cqlengine/tests/model/test_class_construction.py +++ b/cqlengine/tests/model/test_class_construction.py @@ -221,10 +221,6 @@ def test_proper_table_naming(self): assert self.RenamedTest.column_family_name(include_keyspace=False) == 'manual_name' assert self.RenamedTest.column_family_name(include_keyspace=True) == 'whatever.manual_name' - def test_manual_table_name_is_not_inherited(self): - class InheritedTest(self.RenamedTest): pass - assert InheritedTest.table_name is None - class AbstractModel(Model): __abstract__ = True From 4c0648343c161a54d0133968328d18087c3b927b Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Fri, 30 Aug 2013 15:22:34 -0700 Subject: [PATCH 0372/3726] adding polymorphic_key arg to base column --- cqlengine/columns.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index 9940f1aa18..8834b1db7f 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -86,7 +86,8 @@ def __init__(self, db_field=None, default=None, required=False, - clustering_order=None): + clustering_order=None, + polymorphic_key=False): """ :param primary_key: bool flag, indicates this column is a primary key. The first primary key defined on a model is the partition key (unless partition keys are set), all others are cluster keys @@ -99,6 +100,8 @@ def __init__(self, exception if required is set to True and there is a None value assigned :param clustering_order: only applicable on clustering keys (primary keys that are not partition keys) determines the order that the clustering keys are sorted on disk + :param polymorphic_key: boolean, if set to True, this column will be used for saving and loading instances + of polymorphic tables """ self.partition_key = partition_key self.primary_key = partition_key or primary_key @@ -107,6 +110,7 @@ def __init__(self, self.default = default self.required = required self.clustering_order = clustering_order + self.polymorphic_key = polymorphic_key #the column name in the model definition self.column_name = None From 54d610ec20d867f19c61918c315ca6fee421e625 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Tue, 3 Sep 2013 10:25:53 -0700 Subject: [PATCH 0373/3726] adding polymorphic config to model metaclass --- cqlengine/models.py | 35 +++++++++++++++++++++++++++++++++-- 1 file changed, 33 insertions(+), 2 deletions(-) diff --git a/cqlengine/models.py b/cqlengine/models.py index 3212d67244..f43f107428 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -10,6 +10,9 @@ class ModelDefinitionException(ModelException): pass + +class PolyMorphicModelException(ModelException): pass + DEFAULT_KEYSPACE = 'cqlengine' @@ -142,6 +145,8 @@ class MultipleObjectsReturned(_MultipleObjectsReturned): pass #the keyspace for this model __keyspace__ = None + #polymorphism options + __polymorphic_key__ = None # compaction options __compaction__ = None @@ -282,6 +287,14 @@ def get(cls, *args, **kwargs): return cls.objects.get(*args, **kwargs) def save(self): + + # handle polymorphic models + if self._is_polymorphic: + if self._is_polymorphic_base: + raise PolyMorphicModelException('cannot save polymorphic base model') + else: + setattr(self, self._polymorphic_column_name, self.__polymorphic_key__) + is_new = self.pk is None self.validate() self.__dmlquery__(self.__class__, self, batch=self._batch).save() @@ -328,6 +341,9 @@ def __new__(cls, name, bases, attrs): #short circuit __abstract__ inheritance is_abstract = attrs['__abstract__'] = attrs.get('__abstract__', False) + #short circuit __polymorphic_key__ inheritance + attrs['__polymorphic_key__'] = attrs.get('__polymorphic_key__', None) + def _transform_column(col_name, col_obj): column_dict[col_name] = col_obj if col_obj.primary_key: @@ -339,11 +355,20 @@ def _transform_column(col_name, col_obj): column_definitions = [(k,v) for k,v in attrs.items() if isinstance(v, columns.Column)] column_definitions = sorted(column_definitions, lambda x,y: cmp(x[1].position, y[1].position)) + polymorphic_base = any([c[1].polymorphic_key for c in column_definitions]) + column_definitions = inherited_columns.items() + column_definitions + polymorphic_columns = [c for c in column_definitions if c[1].polymorphic_key] + is_polymorphic = len(polymorphic_columns) > 0 + if len(polymorphic_columns) > 1: + raise ModelDefinitionException('only one polymorphic_key can be defined in a model, {} found'.format(len(polymorphic_columns))) + + polymorphic_column_name, polymorphic_column = polymorphic_columns[0] if polymorphic_columns else (None, None) + defined_columns = OrderedDict(column_definitions) - #prepend primary key if one hasn't been defined + # check for primary key if not is_abstract and not any([v.primary_key for k,v in column_definitions]): raise ModelDefinitionException("At least 1 primary key is required.") @@ -356,7 +381,7 @@ def _transform_column(col_name, col_obj): #TODO: check that the defined columns don't conflict with any of the Model API's existing attributes/methods #transform column definitions - for k,v in column_definitions: + for k, v in column_definitions: # counter column primary keys are not allowed if (v.primary_key or v.partition_key) and isinstance(v, (columns.Counter, columns.BaseContainerColumn)): raise ModelDefinitionException('counter columns and container columns cannot be used as primary keys') @@ -413,6 +438,12 @@ def _transform_column(col_name, col_obj): attrs['_clustering_keys'] = clustering_keys attrs['_has_counter'] = len(counter_columns) > 0 + # add polymorphic management attributes + attrs['_is_polymorphic_base'] = polymorphic_base + attrs['_is_polymorphic'] = is_polymorphic + attrs['_polymorphic_column'] = polymorphic_column + attrs['_polymorphic_column_name'] = polymorphic_column_name + #setup class exceptions DoesNotExistBase = None for base in bases: From 0c380402954df642d4462749bb6dfb5cfbce38ab Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Tue, 3 Sep 2013 10:26:05 -0700 Subject: [PATCH 0374/3726] adding tests around polymorphic models --- cqlengine/tests/model/test_polymorphism.py | 110 +++++++++++++++++++++ 1 file changed, 110 insertions(+) create mode 100644 cqlengine/tests/model/test_polymorphism.py diff --git a/cqlengine/tests/model/test_polymorphism.py b/cqlengine/tests/model/test_polymorphism.py new file mode 100644 index 0000000000..2b42f556f5 --- /dev/null +++ b/cqlengine/tests/model/test_polymorphism.py @@ -0,0 +1,110 @@ +import uuid + +from cqlengine import columns +from cqlengine import models +from cqlengine.tests.base import BaseCassEngTestCase +from cqlengine import management + + +class TestPolymorphicClassConstruction(BaseCassEngTestCase): + + def test_multiple_polymorphic_key_failure(self): + """ Tests that defining a model with more than one polymorphic key fails """ + with self.assertRaises(models.ModelDefinitionException): + class M(models.Model): + partition = columns.Integer(primary_key=True) + type1 = columns.Integer(polymorphic_key=True) + type2 = columns.Integer(polymorphic_key=True) + + def test_polymorphic_key_inheritance(self): + """ Tests that polymorphic_key attribute is not inherited """ + class Base(models.Model): + partition = columns.Integer(primary_key=True) + type1 = columns.Integer(polymorphic_key=True) + + class M1(Base): + __polymorphic_key__ = 1 + + class M2(M1): + pass + + assert M2.__polymorphic_key__ is None + + def test_polymorphic_metaclass(self): + """ Tests that the model meta class configures polymorphic models properly """ + class Base(models.Model): + partition = columns.Integer(primary_key=True) + type1 = columns.Integer(polymorphic_key=True) + + class M1(Base): + __polymorphic_key__ = 1 + + assert Base._is_polymorphic + assert M1._is_polymorphic + + assert Base._is_polymorphic_base + assert not M1._is_polymorphic_base + + assert Base._polymorphic_column == Base.type1 + assert M1._polymorphic_column == M1.type1 + + assert Base._polymorphic_column_name == 'type1' + assert M1._polymorphic_column_name == 'type1' + + +class PolyBase(models.Model): + partition = columns.UUID(primary_key=True, default=uuid.uuid4) + row_type = columns.Integer(polymorphic_key=True) + + +class Poly1(PolyBase): + __polymorphic_key__ = 1 + data1 = columns.Text() + + +class Poly2(PolyBase): + __polymorphic_key__ = 2 + data2 = columns.Text() + + +class TestPolymorphicModel(BaseCassEngTestCase): + + @classmethod + def setUpClass(cls): + super(TestPolymorphicModel, cls).setUpClass() + management.sync_table(Poly1) + management.sync_table(Poly2) + + @classmethod + def tearDownClass(cls): + super(TestPolymorphicModel, cls).tearDownClass() + management.drop_table(Poly1) + management.drop_table(Poly2) + + def test_saving_base_model_fails(self): + with self.assertRaises(models.PolyMorphicModelException): + PolyBase.create(partition=1) + + def test_saving_subclass_saves_poly_key(self): + pass + + +class TestIndexedPolymorphicQuery(BaseCassEngTestCase): + + def test_success_case(self): + pass + + def test_polymorphic_key_is_added_to_queries(self): + pass + + +class TestUnindexedPolymorphicQuery(BaseCassEngTestCase): + + def test_non_conflicting_type_results_work(self): + pass + + def test_conflicting_type_results(self): + pass + + def test_allow_filtering_filters_types(self): + pass From 94c7578b0b2c1dc180a5041fa4b847df55fa4ede Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Tue, 3 Sep 2013 12:00:11 -0700 Subject: [PATCH 0375/3726] moving instance construction onto the model --- cqlengine/models.py | 29 +++++++++++++++++++++++++++++ cqlengine/query.py | 16 +--------------- 2 files changed, 30 insertions(+), 15 deletions(-) diff --git a/cqlengine/models.py b/cqlengine/models.py index f43f107428..7d84a3c889 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -189,6 +189,21 @@ def __init__(self, **values): self._is_persisted = False self._batch = None + @classmethod + def _construct_instance(cls, names, values): + """ + method used to construct instances from query results + + :param cls: + :param names: + :param values: + :return: + """ + field_dict = dict((cls._db_map.get(k, k), v) for k, v in zip(names, values)) + instance = cls(**field_dict) + instance._is_persisted = True + return instance + def _can_update(self): """ Called by the save function to check if this should be @@ -216,6 +231,16 @@ def _get_column(cls, name): """ return cls._columns[name] + @classmethod + def _get_polymorphic_base(cls): + if cls._is_polymorphic: + if cls._is_polymorphic_base: + return cls + for base in cls.__bases__: + klass = base._get_polymorphic_base() + if klass is not None: + return klass + def __eq__(self, other): if self.__class__ != other.__class__: return False @@ -246,6 +271,10 @@ def column_family_name(cls, include_keyspace=True): if cls.__table_name__: cf_name = cls.__table_name__.lower() else: + # get polymorphic base table names if model is polymorphic + if cls._is_polymorphic and not cls._is_polymorphic_base: + return cls._get_polymorphic_base().column_family_name(include_keyspace=include_keyspace) + camelcase = re.compile(r'([a-z])([A-Z])') ccase = lambda s: camelcase.sub(lambda v: '{}_{}'.format(v.group(1), v.group(2).lower()), s) diff --git a/cqlengine/query.py b/cqlengine/query.py index 4ad557d59a..9bb1a31a33 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -679,9 +679,6 @@ def _validate_where_syntax(self): if any(not w.column.partition_key for w in token_ops): raise QueryException('The token() function is only supported on the partition key') - - #TODO: abuse this to see if we can get cql to raise an exception - def _where_clause(self): """ Returns a where clause based on the given filter args """ self._validate_where_syntax() @@ -697,21 +694,10 @@ def _get_select_statement(self): db_fields = [self.model._columns[f].db_field_name for f in fields] return 'SELECT {}'.format(', '.join(['"{}"'.format(f) for f in db_fields])) - def _get_instance_constructor(self, names): - """ returns a function used to construct model instances """ - model = self.model - db_map = model._db_map - def _construct_instance(values): - field_dict = dict((db_map.get(k, k), v) for k, v in zip(names, values)) - instance = model(**field_dict) - instance._is_persisted = True - return instance - return _construct_instance - def _get_result_constructor(self, names): """ Returns a function that will be used to instantiate query results """ if not self._values_list: - return self._get_instance_constructor(names) + return lambda values: self.model._construct_instance(names, values) else: columns = [self.model._columns[n] for n in names] if self._flat_values_list: From ba476f8d6dce244d2f0cb5581c027140582f48ce Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Tue, 3 Sep 2013 12:09:17 -0700 Subject: [PATCH 0376/3726] moving polymorphic base discovery into model meta class --- cqlengine/models.py | 29 ++++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/cqlengine/models.py b/cqlengine/models.py index 7d84a3c889..70ecf05a2c 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -231,16 +231,6 @@ def _get_column(cls, name): """ return cls._columns[name] - @classmethod - def _get_polymorphic_base(cls): - if cls._is_polymorphic: - if cls._is_polymorphic_base: - return cls - for base in cls.__bases__: - klass = base._get_polymorphic_base() - if klass is not None: - return klass - def __eq__(self, other): if self.__class__ != other.__class__: return False @@ -273,7 +263,7 @@ def column_family_name(cls, include_keyspace=True): else: # get polymorphic base table names if model is polymorphic if cls._is_polymorphic and not cls._is_polymorphic_base: - return cls._get_polymorphic_base().column_family_name(include_keyspace=include_keyspace) + return cls._polymorphic_base.column_family_name(include_keyspace=include_keyspace) camelcase = re.compile(r'([a-z])([A-Z])') ccase = lambda s: camelcase.sub(lambda v: '{}_{}'.format(v.group(1), v.group(2).lower()), s) @@ -384,7 +374,7 @@ def _transform_column(col_name, col_obj): column_definitions = [(k,v) for k,v in attrs.items() if isinstance(v, columns.Column)] column_definitions = sorted(column_definitions, lambda x,y: cmp(x[1].position, y[1].position)) - polymorphic_base = any([c[1].polymorphic_key for c in column_definitions]) + is_polymorphic_base = any([c[1].polymorphic_key for c in column_definitions]) column_definitions = inherited_columns.items() + column_definitions @@ -395,6 +385,18 @@ def _transform_column(col_name, col_obj): polymorphic_column_name, polymorphic_column = polymorphic_columns[0] if polymorphic_columns else (None, None) + # find polymorphic base class + polymorphic_base = None + if is_polymorphic and not is_polymorphic_base: + def _get_polymorphic_base(bases): + for base in bases: + if getattr(base, '_is_polymorphic_base', False): + return base + klass = _get_polymorphic_base(base.__bases__) + if klass: + return klass + polymorphic_base = _get_polymorphic_base(bases) + defined_columns = OrderedDict(column_definitions) # check for primary key @@ -468,8 +470,9 @@ def _transform_column(col_name, col_obj): attrs['_has_counter'] = len(counter_columns) > 0 # add polymorphic management attributes - attrs['_is_polymorphic_base'] = polymorphic_base + attrs['_is_polymorphic_base'] = is_polymorphic_base attrs['_is_polymorphic'] = is_polymorphic + attrs['_polymorphic_base'] = polymorphic_base attrs['_polymorphic_column'] = polymorphic_column attrs['_polymorphic_column_name'] = polymorphic_column_name From 2fc5350289c87b6db8e9ea313d505c238c348181 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Tue, 3 Sep 2013 12:25:06 -0700 Subject: [PATCH 0377/3726] adding support for polymorphic model deserialization --- cqlengine/models.py | 53 +++++++++++++++++++--- cqlengine/tests/model/test_polymorphism.py | 28 +++++++++++- 2 files changed, 73 insertions(+), 8 deletions(-) diff --git a/cqlengine/models.py b/cqlengine/models.py index 70ecf05a2c..57f922ed41 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -189,18 +189,58 @@ def __init__(self, **values): self._is_persisted = False self._batch = None + @classmethod + def _discover_polymorphic_submodels(cls): + if not cls._is_polymorphic_base: + raise ModelException('_discover_polymorphic_submodels can only be called on polymorphic base classes') + def _discover(klass): + if not klass._is_polymorphic_base and klass.__polymorphic_key__ is not None: + cls._polymorphic_map[klass.__polymorphic_key__] = klass + for subklass in klass.__subclasses__(): + _discover(subklass) + _discover(cls) + + @classmethod + def _get_model_by_polymorphic_key(cls, key): + if not cls._is_polymorphic_base: + raise ModelException('_get_model_by_polymorphic_key can only be called on polymorphic base classes') + return cls._polymorphic_map.get(key) + @classmethod def _construct_instance(cls, names, values): """ method used to construct instances from query results - - :param cls: - :param names: - :param values: - :return: + this is where polymorphic deserialization occurs """ field_dict = dict((cls._db_map.get(k, k), v) for k, v in zip(names, values)) - instance = cls(**field_dict) + if cls._is_polymorphic: + poly_key = field_dict.get(cls._polymorphic_column_name) + + if poly_key is None: + raise PolyMorphicModelException('polymorphic key was not found in values') + + poly_base = cls if cls._is_polymorphic_base else cls._polymorphic_base + + klass = poly_base._get_model_by_polymorphic_key(poly_key) + if klass is None: + poly_base._discover_polymorphic_submodels() + klass = poly_base._get_model_by_polymorphic_key(poly_key) + if klass is None: + raise PolyMorphicModelException( + 'unrecognized polymorphic key {} for class {}'.format(poly_key, poly_base.__name__) + ) + + if not issubclass(klass, poly_base): + raise PolyMorphicModelException( + '{} is not a subclass of {}'.format(klass.__name__, poly_base.__name__) + ) + + field_dict = {k: v for k, v in field_dict.items() if k in klass._columns.keys()} + + else: + klass = cls + + instance = klass(**field_dict) instance._is_persisted = True return instance @@ -475,6 +515,7 @@ def _get_polymorphic_base(bases): attrs['_polymorphic_base'] = polymorphic_base attrs['_polymorphic_column'] = polymorphic_column attrs['_polymorphic_column_name'] = polymorphic_column_name + attrs['_polymorphic_map'] = {} if is_polymorphic_base else None #setup class exceptions DoesNotExistBase = None diff --git a/cqlengine/tests/model/test_polymorphism.py b/cqlengine/tests/model/test_polymorphism.py index 2b42f556f5..7fc5275ae3 100644 --- a/cqlengine/tests/model/test_polymorphism.py +++ b/cqlengine/tests/model/test_polymorphism.py @@ -51,6 +51,16 @@ class M1(Base): assert Base._polymorphic_column_name == 'type1' assert M1._polymorphic_column_name == 'type1' + def test_table_names_are_inherited_from_poly_base(self): + class Base(models.Model): + partition = columns.Integer(primary_key=True) + type1 = columns.Integer(polymorphic_key=True) + + class M1(Base): + __polymorphic_key__ = 1 + + assert Base.column_family_name() == M1.column_family_name() + class PolyBase(models.Model): partition = columns.UUID(primary_key=True, default=uuid.uuid4) @@ -83,10 +93,24 @@ def tearDownClass(cls): def test_saving_base_model_fails(self): with self.assertRaises(models.PolyMorphicModelException): - PolyBase.create(partition=1) + PolyBase.create() def test_saving_subclass_saves_poly_key(self): - pass + p1 = Poly1.create(data1='pickle') + p2 = Poly2.create(data2='bacon') + + assert p1.row_type == Poly1.__polymorphic_key__ + assert p2.row_type == Poly2.__polymorphic_key__ + + def test_query_deserialization(self): + p1 = Poly1.create(data1='pickle') + p2 = Poly2.create(data2='bacon') + + p1r = PolyBase.get(partition=p1.partition) + p2r = PolyBase.get(partition=p2.partition) + + assert isinstance(p1r, Poly1) + assert isinstance(p2r, Poly2) class TestIndexedPolymorphicQuery(BaseCassEngTestCase): From 2984ba71634e5c3d4b23bb42a977401ca60ffc01 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Tue, 3 Sep 2013 15:35:28 -0700 Subject: [PATCH 0378/3726] adding tests around unindexed polymorphic columns --- cqlengine/models.py | 2 +- cqlengine/tests/model/test_polymorphism.py | 65 +++++++++++++++++++--- 2 files changed, 59 insertions(+), 8 deletions(-) diff --git a/cqlengine/models.py b/cqlengine/models.py index 57f922ed41..8a7d58f8ea 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -230,7 +230,7 @@ def _construct_instance(cls, names, values): 'unrecognized polymorphic key {} for class {}'.format(poly_key, poly_base.__name__) ) - if not issubclass(klass, poly_base): + if not issubclass(klass, cls): raise PolyMorphicModelException( '{} is not a subclass of {}'.format(klass.__name__, poly_base.__name__) ) diff --git a/cqlengine/tests/model/test_polymorphism.py b/cqlengine/tests/model/test_polymorphism.py index 7fc5275ae3..6dece75fd8 100644 --- a/cqlengine/tests/model/test_polymorphism.py +++ b/cqlengine/tests/model/test_polymorphism.py @@ -113,22 +113,73 @@ def test_query_deserialization(self): assert isinstance(p2r, Poly2) -class TestIndexedPolymorphicQuery(BaseCassEngTestCase): +class UnindexedPolyBase(models.Model): + partition = columns.UUID(primary_key=True, default=uuid.uuid4) + cluster = columns.UUID(primary_key=True, default=uuid.uuid4) + row_type = columns.Integer(polymorphic_key=True) - def test_success_case(self): - pass - def test_polymorphic_key_is_added_to_queries(self): - pass +class UnindexedPoly1(UnindexedPolyBase): + __polymorphic_key__ = 1 + data1 = columns.Text() + + +class UnindexedPoly2(UnindexedPolyBase): + __polymorphic_key__ = 2 + data2 = columns.Text() class TestUnindexedPolymorphicQuery(BaseCassEngTestCase): + @classmethod + def setUpClass(cls): + super(TestUnindexedPolymorphicQuery, cls).setUpClass() + management.sync_table(UnindexedPoly1) + management.sync_table(UnindexedPoly2) + + cls.p1 = UnindexedPoly1.create(data1='pickle') + cls.p2 = UnindexedPoly2.create(partition=cls.p1.partition, data2='bacon') + + @classmethod + def tearDownClass(cls): + super(TestUnindexedPolymorphicQuery, cls).tearDownClass() + management.drop_table(UnindexedPoly1) + management.drop_table(UnindexedPoly2) + def test_non_conflicting_type_results_work(self): - pass + assert len([p for p in UnindexedPoly1.objects(partition=self.p1.partition, cluster=self.p1.cluster)]) == 1 + assert len([p for p in UnindexedPoly2.objects(partition=self.p1partition, cluster=self.p2.cluster)]) == 1 def test_conflicting_type_results(self): - pass + with self.assertRaises(models.PolyMorphicModelException): + [p for p in UnindexedPoly1.objects(partition=self.p1.partition)] + with self.assertRaises(models.PolyMorphicModelException): + [p for p in UnindexedPoly2.objects(partition=self.p1.partition)] def test_allow_filtering_filters_types(self): pass + + +class IndexedPolyBase(models.Model): + partition = columns.UUID(primary_key=True, default=uuid.uuid4) + row_type = columns.Integer(polymorphic_key=True, index=True) + + +class IndexedPoly1(IndexedPolyBase): + __polymorphic_key__ = 1 + data1 = columns.Text() + + +class IndexedPoly2(IndexedPolyBase): + __polymorphic_key__ = 2 + data2 = columns.Text() + + +class TestIndexedPolymorphicQuery(BaseCassEngTestCase): + + def test_success_case(self): + pass + + def test_polymorphic_key_is_added_to_queries(self): + pass + From 5dc9e971267c2072c60ae9271c6e230813f72e15 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Tue, 3 Sep 2013 16:29:38 -0700 Subject: [PATCH 0379/3726] finishing up the unindexed polymorphic key tests --- cqlengine/models.py | 2 +- cqlengine/tests/model/test_polymorphism.py | 32 +++++++++++++++++----- 2 files changed, 26 insertions(+), 8 deletions(-) diff --git a/cqlengine/models.py b/cqlengine/models.py index 8a7d58f8ea..0d94e08cbf 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -232,7 +232,7 @@ def _construct_instance(cls, names, values): if not issubclass(klass, cls): raise PolyMorphicModelException( - '{} is not a subclass of {}'.format(klass.__name__, poly_base.__name__) + '{} is not a subclass of {}'.format(klass.__name__, cls.__name__) ) field_dict = {k: v for k, v in field_dict.items() if k in klass._columns.keys()} diff --git a/cqlengine/tests/model/test_polymorphism.py b/cqlengine/tests/model/test_polymorphism.py index 6dece75fd8..d24ac34044 100644 --- a/cqlengine/tests/model/test_polymorphism.py +++ b/cqlengine/tests/model/test_polymorphism.py @@ -61,6 +61,9 @@ class M1(Base): assert Base.column_family_name() == M1.column_family_name() + def test_collection_columns_cant_be_polymorphic_keys(self): + pass + class PolyBase(models.Model): partition = columns.UUID(primary_key=True, default=uuid.uuid4) @@ -129,6 +132,11 @@ class UnindexedPoly2(UnindexedPolyBase): data2 = columns.Text() +class UnindexedPoly3(UnindexedPoly2): + __polymorphic_key__ = 3 + data3 = columns.Text() + + class TestUnindexedPolymorphicQuery(BaseCassEngTestCase): @classmethod @@ -136,28 +144,33 @@ def setUpClass(cls): super(TestUnindexedPolymorphicQuery, cls).setUpClass() management.sync_table(UnindexedPoly1) management.sync_table(UnindexedPoly2) + management.sync_table(UnindexedPoly3) cls.p1 = UnindexedPoly1.create(data1='pickle') cls.p2 = UnindexedPoly2.create(partition=cls.p1.partition, data2='bacon') + cls.p3 = UnindexedPoly3.create(partition=cls.p1.partition, data3='bacon') @classmethod def tearDownClass(cls): super(TestUnindexedPolymorphicQuery, cls).tearDownClass() management.drop_table(UnindexedPoly1) management.drop_table(UnindexedPoly2) + management.drop_table(UnindexedPoly3) def test_non_conflicting_type_results_work(self): - assert len([p for p in UnindexedPoly1.objects(partition=self.p1.partition, cluster=self.p1.cluster)]) == 1 - assert len([p for p in UnindexedPoly2.objects(partition=self.p1partition, cluster=self.p2.cluster)]) == 1 + p1, p2, p3 = self.p1, self.p2, self.p3 + assert len(list(UnindexedPoly1.objects(partition=p1.partition, cluster=p1.cluster))) == 1 + assert len(list(UnindexedPoly2.objects(partition=p1.partition, cluster=p2.cluster))) == 1 + + def test_subclassed_model_results_work_properly(self): + p1, p2, p3 = self.p1, self.p2, self.p3 + assert len(list(UnindexedPoly2.objects(partition=p1.partition, cluster__in=[p2.cluster, p3.cluster]))) == 2 def test_conflicting_type_results(self): with self.assertRaises(models.PolyMorphicModelException): - [p for p in UnindexedPoly1.objects(partition=self.p1.partition)] + list(UnindexedPoly1.objects(partition=self.p1.partition)) with self.assertRaises(models.PolyMorphicModelException): - [p for p in UnindexedPoly2.objects(partition=self.p1.partition)] - - def test_allow_filtering_filters_types(self): - pass + list(UnindexedPoly2.objects(partition=self.p1.partition)) class IndexedPolyBase(models.Model): @@ -175,6 +188,11 @@ class IndexedPoly2(IndexedPolyBase): data2 = columns.Text() +class IndexedPoly3(IndexedPoly2): + __polymorphic_key__ = 3 + data3 = columns.Text() + + class TestIndexedPolymorphicQuery(BaseCassEngTestCase): def test_success_case(self): From 8aa59edb36deb7f985dea556251cf66c9242be1b Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Tue, 3 Sep 2013 16:33:22 -0700 Subject: [PATCH 0380/3726] switching select query to all columns when defer and only fields aren't defined --- cqlengine/query.py | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index 9bb1a31a33..02a5d7d577 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -686,13 +686,16 @@ def _where_clause(self): def _get_select_statement(self): """ Returns the fields to be returned by the select query """ - fields = self.model._columns.keys() - if self._defer_fields: - fields = [f for f in fields if f not in self._defer_fields] - elif self._only_fields: - fields = self._only_fields - db_fields = [self.model._columns[f].db_field_name for f in fields] - return 'SELECT {}'.format(', '.join(['"{}"'.format(f) for f in db_fields])) + if self._defer_fields or self._only_fields: + fields = self.model._columns.keys() + if self._defer_fields: + fields = [f for f in fields if f not in self._defer_fields] + elif self._only_fields: + fields = self._only_fields + db_fields = [self.model._columns[f].db_field_name for f in fields] + return 'SELECT {}'.format(', '.join(['"{}"'.format(f) for f in db_fields])) + else: + return 'SELECT *' def _get_result_constructor(self, names): """ Returns a function that will be used to instantiate query results """ From 880600924ae33998b7dddc6b36de00618b089511 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Tue, 3 Sep 2013 16:53:59 -0700 Subject: [PATCH 0381/3726] adding support for auto filtering on polymorphic subclass models with and indexed polymorphic key --- cqlengine/models.py | 13 ++++++++++- cqlengine/tests/model/test_polymorphism.py | 26 +++++++++++++++------- 2 files changed, 30 insertions(+), 9 deletions(-) diff --git a/cqlengine/models.py b/cqlengine/models.py index 0d94e08cbf..e55ed5ae45 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -49,7 +49,18 @@ def __get__(self, obj, model): """ :rtype: ModelQuerySet """ if model.__abstract__: raise CQLEngineException('cannot execute queries against abstract models') - return model.__queryset__(model) + queryset = model.__queryset__(model) + + # if this is a concrete polymorphic model, and the polymorphic + # key is an indexed column, add a filter clause to only return + # logical rows of the proper type + if model._is_polymorphic and not model._is_polymorphic_base: + name, column = model._polymorphic_column_name, model._polymorphic_column + if column.partition_key or column.index: + # look for existing poly types + return queryset.filter(**{name: model.__polymorphic_key__}) + + return queryset def __call__(self, *args, **kwargs): """ diff --git a/cqlengine/tests/model/test_polymorphism.py b/cqlengine/tests/model/test_polymorphism.py index d24ac34044..d608aae1b7 100644 --- a/cqlengine/tests/model/test_polymorphism.py +++ b/cqlengine/tests/model/test_polymorphism.py @@ -148,7 +148,7 @@ def setUpClass(cls): cls.p1 = UnindexedPoly1.create(data1='pickle') cls.p2 = UnindexedPoly2.create(partition=cls.p1.partition, data2='bacon') - cls.p3 = UnindexedPoly3.create(partition=cls.p1.partition, data3='bacon') + cls.p3 = UnindexedPoly3.create(partition=cls.p1.partition, data3='turkey') @classmethod def tearDownClass(cls): @@ -175,6 +175,7 @@ def test_conflicting_type_results(self): class IndexedPolyBase(models.Model): partition = columns.UUID(primary_key=True, default=uuid.uuid4) + cluster = columns.UUID(primary_key=True, default=uuid.uuid4) row_type = columns.Integer(polymorphic_key=True, index=True) @@ -188,16 +189,25 @@ class IndexedPoly2(IndexedPolyBase): data2 = columns.Text() -class IndexedPoly3(IndexedPoly2): - __polymorphic_key__ = 3 - data3 = columns.Text() +class TestIndexedPolymorphicQuery(BaseCassEngTestCase): + + @classmethod + def setUpClass(cls): + super(TestIndexedPolymorphicQuery, cls).setUpClass() + management.sync_table(IndexedPoly1) + management.sync_table(IndexedPoly2) + cls.p1 = IndexedPoly1.create(data1='pickle') + cls.p2 = IndexedPoly2.create(partition=cls.p1.partition, data2='bacon') -class TestIndexedPolymorphicQuery(BaseCassEngTestCase): + @classmethod + def tearDownClass(cls): + super(TestIndexedPolymorphicQuery, cls).tearDownClass() + management.drop_table(IndexedPoly1) + management.drop_table(IndexedPoly2) def test_success_case(self): - pass + assert len(list(IndexedPoly1.objects(partition=self.p1.partition))) == 1 + assert len(list(IndexedPoly2.objects(partition=self.p1.partition))) == 1 - def test_polymorphic_key_is_added_to_queries(self): - pass From f0b6966d41f16a608bfcd25c30d10cbab57919c2 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Tue, 3 Sep 2013 17:00:15 -0700 Subject: [PATCH 0382/3726] adding restriction around defining counter or container columns as polymorphic keys --- cqlengine/models.py | 3 +++ cqlengine/tests/model/test_polymorphism.py | 5 ++++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/cqlengine/models.py b/cqlengine/models.py index e55ed5ae45..d01208ea09 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -436,6 +436,9 @@ def _transform_column(col_name, col_obj): polymorphic_column_name, polymorphic_column = polymorphic_columns[0] if polymorphic_columns else (None, None) + if isinstance(polymorphic_column, (columns.BaseContainerColumn, columns.Counter)): + raise ModelDefinitionException('counter and container columns cannot be used for polymorphic keys') + # find polymorphic base class polymorphic_base = None if is_polymorphic and not is_polymorphic_base: diff --git a/cqlengine/tests/model/test_polymorphism.py b/cqlengine/tests/model/test_polymorphism.py index d608aae1b7..00078bcb50 100644 --- a/cqlengine/tests/model/test_polymorphism.py +++ b/cqlengine/tests/model/test_polymorphism.py @@ -62,7 +62,10 @@ class M1(Base): assert Base.column_family_name() == M1.column_family_name() def test_collection_columns_cant_be_polymorphic_keys(self): - pass + with self.assertRaises(models.ModelDefinitionException): + class Base(models.Model): + partition = columns.Integer(primary_key=True) + type1 = columns.Set(columns.Integer, polymorphic_key=True) class PolyBase(models.Model): From cdb88e4fa4897769011c0181ef48c0ef6cdd4621 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Tue, 3 Sep 2013 18:32:06 -0700 Subject: [PATCH 0383/3726] adding custom validation --- cqlengine/models.py | 6 ++++ cqlengine/tests/model/test_validation.py | 46 ++++++++++++++++++++++++ 2 files changed, 52 insertions(+) diff --git a/cqlengine/models.py b/cqlengine/models.py index d01208ea09..202234af5e 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -331,6 +331,12 @@ def validate(self): """ Cleans and validates the field values """ for name, col in self._columns.items(): val = col.validate(getattr(self, name)) + + # handle custom validation + validation_function = getattr(self, 'validate_{}'.format(name), None) + if validation_function: + val = validation_function(val) + setattr(self, name, val) def _as_dict(self): diff --git a/cqlengine/tests/model/test_validation.py b/cqlengine/tests/model/test_validation.py index e69de29bb2..9a59872731 100644 --- a/cqlengine/tests/model/test_validation.py +++ b/cqlengine/tests/model/test_validation.py @@ -0,0 +1,46 @@ +from uuid import uuid4 + +from cqlengine.tests.base import BaseCassEngTestCase +from cqlengine.management import sync_table +from cqlengine.management import drop_table +from cqlengine.models import Model +from cqlengine import columns +from cqlengine import ValidationError + + +class CustomValidationTestModel(Model): + id = columns.UUID(primary_key=True, default=lambda: uuid4()) + count = columns.Integer() + text = columns.Text(required=False) + a_bool = columns.Boolean(default=False) + + def validate_a_bool(self, val): + if val: + raise ValidationError('False only!') + return val + + def validate_text(self, val): + if val.lower() == 'jon': + raise ValidationError('no one likes jon') + return val + + +class TestCustomValidation(BaseCassEngTestCase): + + @classmethod + def setUpClass(cls): + super(TestCustomValidation, cls).setUpClass() + sync_table(CustomValidationTestModel) + + @classmethod + def tearDownClass(cls): + super(TestCustomValidation, cls).tearDownClass() + drop_table(CustomValidationTestModel) + + def test_custom_validation(self): + # sanity check + CustomValidationTestModel.create(count=5, text='txt', a_bool=False) + + with self.assertRaises(ValidationError): + CustomValidationTestModel.create(count=5, text='txt', a_bool=True) + From 3b3d00051816f567348a57e1d11cf8e0db15dd97 Mon Sep 17 00:00:00 2001 From: Tommaso Barbugli Date: Wed, 4 Sep 2013 15:23:51 +0200 Subject: [PATCH 0384/3726] get cassandra host from the environment (CASSANDRA_TEST_HOST) or default to localhost --- cqlengine/tests/base.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/cqlengine/tests/base.py b/cqlengine/tests/base.py index 126049a860..98e34eaa7e 100644 --- a/cqlengine/tests/base.py +++ b/cqlengine/tests/base.py @@ -1,13 +1,20 @@ from unittest import TestCase from cqlengine import connection +import os + + +if os.environ.get('CASSANDRA_TEST_HOST'): + CASSANDRA_TEST_HOST = os.environ['CASSANDRA_TEST_HOST'] +else: + CASSANDRA_TEST_HOST = 'localhost:9160' + class BaseCassEngTestCase(TestCase): @classmethod def setUpClass(cls): super(BaseCassEngTestCase, cls).setUpClass() - # todo fix - connection.setup(['localhost:9160'], default_keyspace='cqlengine_test') + connection.setup([CASSANDRA_TEST_HOST], default_keyspace='cqlengine_test') def assertHasAttr(self, obj, attr): self.assertTrue(hasattr(obj, attr), From fb6543c973b8e445a96c110780fdecd0d8ebf7e1 Mon Sep 17 00:00:00 2001 From: Tommaso Barbugli Date: Wed, 4 Sep 2013 15:56:09 +0200 Subject: [PATCH 0385/3726] add VarInt column --- cqlengine/columns.py | 20 ++++++++++++++++ cqlengine/tests/columns/test_validation.py | 28 +++++++++++++++++++++- docs/topics/columns.rst | 6 +++++ 3 files changed, 53 insertions(+), 1 deletion(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index 9940f1aa18..be3e193110 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -246,6 +246,26 @@ def to_database(self, value): return self.validate(value) +class VarInt(Column): + db_type = 'varint' + + def validate(self, value): + val = super(VarInt, self).validate(value) + if val is None: + return + try: + return long(val) + except (TypeError, ValueError): + raise ValidationError( + "{} can't be converted to integral value".format(value)) + + def to_python(self, value): + return self.validate(value) + + def to_database(self, value): + return self.validate(value) + + class CounterValueManager(BaseValueManager): def __init__(self, instance, column, value): super(CounterValueManager, self).__init__(instance, column, value) diff --git a/cqlengine/tests/columns/test_validation.py b/cqlengine/tests/columns/test_validation.py index 53cd738c02..e2393d5b34 100644 --- a/cqlengine/tests/columns/test_validation.py +++ b/cqlengine/tests/columns/test_validation.py @@ -14,6 +14,7 @@ from cqlengine.columns import Ascii from cqlengine.columns import Text from cqlengine.columns import Integer +from cqlengine.columns import VarInt from cqlengine.columns import DateTime from cqlengine.columns import Date from cqlengine.columns import UUID @@ -24,6 +25,9 @@ from cqlengine.management import create_table, delete_table from cqlengine.models import Model +import sys + + class TestDatetime(BaseCassEngTestCase): class DatetimeTest(Model): test_id = Integer(primary_key=True) @@ -64,6 +68,28 @@ def test_datetime_date_support(self): assert dt2.created_at.isoformat() == datetime(today.year, today.month, today.day).isoformat() +class TestVarInt(BaseCassEngTestCase): + class VarIntTest(Model): + test_id = Integer(primary_key=True) + bignum = VarInt(primary_key=True) + + @classmethod + def setUpClass(cls): + super(TestVarInt, cls).setUpClass() + create_table(cls.VarIntTest) + + @classmethod + def tearDownClass(cls): + super(TestVarInt, cls).tearDownClass() + delete_table(cls.VarIntTest) + + def test_varint_io(self): + long_int = sys.maxint + 1 + int1 = self.VarIntTest.objects.create(test_id=0, bignum=long_int) + int2 = self.VarIntTest.objects(test_id=0).first() + assert int1.bignum == int2.bignum + + class TestDate(BaseCassEngTestCase): class DateTest(Model): test_id = Integer(primary_key=True) @@ -109,7 +135,7 @@ def tearDownClass(cls): super(TestDecimal, cls).tearDownClass() delete_table(cls.DecimalTest) - def test_datetime_io(self): + def test_decimal_io(self): dt = self.DecimalTest.objects.create(test_id=0, dec_val=D('0.00')) dt2 = self.DecimalTest.objects(test_id=0).first() assert dt2.dec_val == dt.dec_val diff --git a/docs/topics/columns.rst b/docs/topics/columns.rst index 5ee9a18686..0418147f18 100644 --- a/docs/topics/columns.rst +++ b/docs/topics/columns.rst @@ -42,6 +42,12 @@ Columns columns.Integer() +.. class:: VarInt() + + Stores an arbitrary-precision integer :: + + columns.VarInt() + .. class:: DateTime() Stores a datetime value. From b35f915044fd697f2ad9a1f9acbd4aec520b0d66 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Wed, 4 Sep 2013 11:19:06 -0700 Subject: [PATCH 0386/3726] reverting custom validation changes --- cqlengine/models.py | 6 ---- cqlengine/tests/model/test_validation.py | 45 ------------------------ 2 files changed, 51 deletions(-) diff --git a/cqlengine/models.py b/cqlengine/models.py index 202234af5e..d01208ea09 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -331,12 +331,6 @@ def validate(self): """ Cleans and validates the field values """ for name, col in self._columns.items(): val = col.validate(getattr(self, name)) - - # handle custom validation - validation_function = getattr(self, 'validate_{}'.format(name), None) - if validation_function: - val = validation_function(val) - setattr(self, name, val) def _as_dict(self): diff --git a/cqlengine/tests/model/test_validation.py b/cqlengine/tests/model/test_validation.py index 9a59872731..8b13789179 100644 --- a/cqlengine/tests/model/test_validation.py +++ b/cqlengine/tests/model/test_validation.py @@ -1,46 +1 @@ -from uuid import uuid4 - -from cqlengine.tests.base import BaseCassEngTestCase -from cqlengine.management import sync_table -from cqlengine.management import drop_table -from cqlengine.models import Model -from cqlengine import columns -from cqlengine import ValidationError - - -class CustomValidationTestModel(Model): - id = columns.UUID(primary_key=True, default=lambda: uuid4()) - count = columns.Integer() - text = columns.Text(required=False) - a_bool = columns.Boolean(default=False) - - def validate_a_bool(self, val): - if val: - raise ValidationError('False only!') - return val - - def validate_text(self, val): - if val.lower() == 'jon': - raise ValidationError('no one likes jon') - return val - - -class TestCustomValidation(BaseCassEngTestCase): - - @classmethod - def setUpClass(cls): - super(TestCustomValidation, cls).setUpClass() - sync_table(CustomValidationTestModel) - - @classmethod - def tearDownClass(cls): - super(TestCustomValidation, cls).tearDownClass() - drop_table(CustomValidationTestModel) - - def test_custom_validation(self): - # sanity check - CustomValidationTestModel.create(count=5, text='txt', a_bool=False) - - with self.assertRaises(ValidationError): - CustomValidationTestModel.create(count=5, text='txt', a_bool=True) From 031ea5e8ad1540aed356f597dc72935c591f551b Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Wed, 4 Sep 2013 14:05:34 -0700 Subject: [PATCH 0387/3726] adding docs for polymorphic models and user defined validation --- docs/topics/models.rst | 86 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 86 insertions(+) diff --git a/docs/topics/models.rst b/docs/topics/models.rst index 961a866be9..183fa27b12 100644 --- a/docs/topics/models.rst +++ b/docs/topics/models.rst @@ -154,6 +154,92 @@ Model Attributes *Optional.* Sets the name of the keyspace used by this model. Defaulst to cqlengine +Table Polymorphism +================== + + As of cqlengine 0.8, it is possible to save and load different model classes using a single CQL table. + This is useful in situations where you have different object types that you want to store in a single cassandra row. + + For instance, suppose you want a table that stores rows of pets owned by an owner: + + .. code-block:: python + + class Pet(Model): + __table_name__ = 'pet' + owner_id = UUID(primary_key=True) + pet_id = UUID(primary_key=True) + pet_type = Text(polymorphic_key=True) + name = Text() + + def eat(self, food): + pass + + def sleep(self, time): + pass + + class Cat(Pet): + __polymorphic_key__ = 'cat' + cuteness = Float() + + def tear_up_couch(self): + pass + + class Dog(Pet): + __polymorphic_key__ = 'dog' + fierceness = Float() + + def bark_all_night(self): + pass + + After calling ``sync_table`` on each of these tables, the columns defined in each model will be added to the + ``pet`` table. Additionally, saving ``Cat`` and ``Dog`` models will save the meta data needed to identify each row + as either a cat or dog. + + To setup a polymorphic model structure, follow these steps + + 1. Create a base model with a column set as the polymorphic_key (set ``polymorphic_key=True`` in the column definition) + 2. Create subclass models, and define a unique ``__polymorphic_key__`` value on each + 3. Run ``sync_table`` on each of the sub tables + + **About the polymorphic key** + + The polymorphic key is what cqlengine uses under the covers to map logical cql rows to the appropriate model type. The + base model maintains a map of polymorphic keys to subclasses. When a polymorphic model is saved, this value is automatically + saved into the polymorphic key column. You can set the polymorphic key column to any column type that you like, with + the exception of container and counter columns, although ``Integer`` columns make the most sense. Additionally, if you + set ``index=True`` on your polymorphic key column, you can execute queries against polymorphic subclasses, and a + ``WHERE`` clause will be automatically added to your query, returning only rows of that type. Note that you must + define a unique ``__polymorphic_key__`` value to each subclass, and that you can only assign a single polymorphic + key column per model + + +Extending Model Validation +========================== + + Each time you save a model instance in cqlengine, the data in the model is validated against the schema you've defined + for your model. Most of the validation is fairly straightforward, it basically checks that you're not trying to do + something like save text into an integer column, and it enforces the ``required`` flag set on column definitions. + It also performs any transformations needed to save the data properly. + + However, there are often additional constraints or transformations you want to impose on your data, beyond simply + making sure that Cassandra won't complain when you try to insert it. To define additional validation on a model, + extend the model's validation method: + + .. code-block:: python + + class Member(Model): + person_id = UUID(primary_key=True) + name = Text(required=True) + + def validate(self): + super(Member, self).validate() + if self.name == 'jon': + raise ValidationError('no jon\'s allowed') + + *Note*: while not required, the convention is to raise a ``ValidationError`` (``from cqlengine import ValidationError``) + if validation fails + + Compaction Options ==================== From 712814432ab013d753e7e68f534e71a8886e6386 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Wed, 4 Sep 2013 17:14:26 -0700 Subject: [PATCH 0388/3726] fixing typo in docs --- docs/topics/models.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/topics/models.rst b/docs/topics/models.rst index 183fa27b12..06b720d4de 100644 --- a/docs/topics/models.rst +++ b/docs/topics/models.rst @@ -151,7 +151,7 @@ Model Attributes .. attribute:: Model.__keyspace__ - *Optional.* Sets the name of the keyspace used by this model. Defaulst to cqlengine + *Optional.* Sets the name of the keyspace used by this model. Defaults to cqlengine Table Polymorphism From 95246fc13421ab01758a13094878ed3339436346 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Wed, 4 Sep 2013 17:18:08 -0700 Subject: [PATCH 0389/3726] updating the doc index with download links --- docs/index.rst | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/docs/index.rst b/docs/index.rst index b97360ebe6..e4e8c394fd 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -13,7 +13,15 @@ cqlengine is a Cassandra CQL 3 Object Mapper for Python :ref:`getting-started` +Download +======== + +`Github `_ + +`PyPi `_ + Contents: +========= .. toctree:: :maxdepth: 2 From 41b9284c7129fad9f8d28c3f7cfbee2e01330f98 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Wed, 4 Sep 2013 21:04:34 -0700 Subject: [PATCH 0390/3726] savage version bump --- cqlengine/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cqlengine/VERSION b/cqlengine/VERSION index 39e898a4f9..aec258df73 100644 --- a/cqlengine/VERSION +++ b/cqlengine/VERSION @@ -1 +1 @@ -0.7.1 +0.8 From e72cde5f0e7de393e2b9494062245e0eaf1f7392 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Mon, 23 Sep 2013 17:53:26 -0700 Subject: [PATCH 0391/3726] validation around not throwing an exception when updating a keyspace --- cqlengine/models.py | 7 +++---- cqlengine/tests/columns/test_validation.py | 13 ++++++++++++- 2 files changed, 15 insertions(+), 5 deletions(-) diff --git a/cqlengine/models.py b/cqlengine/models.py index d01208ea09..2969c228aa 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -184,10 +184,6 @@ class MultipleObjectsReturned(_MultipleObjectsReturned): pass def __init__(self, **values): self._values = {} - extra_columns = set(values.keys()) - set(self._columns.keys()) - if extra_columns: - raise ValidationError("Incorrect columns passed: {}".format(extra_columns)) - for name, column in self._columns.items(): value = values.get(name, None) if value is not None or isinstance(column, columns.BaseContainerColumn): @@ -342,6 +338,9 @@ def _as_dict(self): @classmethod def create(cls, **kwargs): + extra_columns = set(kwargs.keys()) - set(cls._columns.keys()) + if extra_columns: + raise ValidationError("Incorrect columns passed: {}".format(extra_columns)) return cls.objects.create(**kwargs) @classmethod diff --git a/cqlengine/tests/columns/test_validation.py b/cqlengine/tests/columns/test_validation.py index e2393d5b34..6362375ed4 100644 --- a/cqlengine/tests/columns/test_validation.py +++ b/cqlengine/tests/columns/test_validation.py @@ -6,6 +6,7 @@ from unittest import TestCase from uuid import uuid4, uuid1 from cqlengine import ValidationError +from cqlengine.connection import execute from cqlengine.tests.base import BaseCassEngTestCase @@ -22,7 +23,7 @@ from cqlengine.columns import Float from cqlengine.columns import Decimal -from cqlengine.management import create_table, delete_table +from cqlengine.management import create_table, delete_table, sync_table from cqlengine.models import Model import sys @@ -234,6 +235,16 @@ def test_extra_field(self): with self.assertRaises(ValidationError): self.TestModel.create(bacon=5000) +class TestPythonDoesntDieWhenExtraFieldIsInCassandra(BaseCassEngTestCase): + class TestModel(Model): + __table_name__ = 'alter_doesnt_break_running_app' + id = UUID(primary_key=True, default=uuid4) + + def test_extra_field(self): + sync_table(self.TestModel) + self.TestModel.create() + execute("ALTER TABLE {} add blah int".format(self.TestModel.column_family_name(include_keyspace=True))) + self.TestModel.objects().all() class TestTimeUUIDFromDatetime(TestCase): def test_conversion_specific_date(self): From 3bd1ee58551d4faf21a69a7a82b56f41c94c9c9a Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Mon, 23 Sep 2013 17:57:39 -0700 Subject: [PATCH 0392/3726] removed python 2.6, fixed issue with model creation --- cqlengine/VERSION | 2 +- cqlengine/tests/columns/test_validation.py | 3 ++- setup.py | 1 - 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cqlengine/VERSION b/cqlengine/VERSION index 39e898a4f9..100435be13 100644 --- a/cqlengine/VERSION +++ b/cqlengine/VERSION @@ -1 +1 @@ -0.7.1 +0.8.2 diff --git a/cqlengine/tests/columns/test_validation.py b/cqlengine/tests/columns/test_validation.py index 6362375ed4..3c6c665614 100644 --- a/cqlengine/tests/columns/test_validation.py +++ b/cqlengine/tests/columns/test_validation.py @@ -23,7 +23,7 @@ from cqlengine.columns import Float from cqlengine.columns import Decimal -from cqlengine.management import create_table, delete_table, sync_table +from cqlengine.management import create_table, delete_table, sync_table, drop_table from cqlengine.models import Model import sys @@ -241,6 +241,7 @@ class TestModel(Model): id = UUID(primary_key=True, default=uuid4) def test_extra_field(self): + drop_table(self.TestModel) sync_table(self.TestModel) self.TestModel.create() execute("ALTER TABLE {} add blah int".format(self.TestModel.column_family_name(include_keyspace=True))) diff --git a/setup.py b/setup.py index 944d41191f..3bb09c9c2c 100644 --- a/setup.py +++ b/setup.py @@ -28,7 +28,6 @@ "Environment :: Plugins", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", - "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Topic :: Internet :: WWW/HTTP", "Topic :: Software Development :: Libraries :: Python Modules", From 96d4b45effcdf6cda5bfe7778f1347a384b5a260 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Mon, 23 Sep 2013 17:57:53 -0700 Subject: [PATCH 0393/3726] version bump --- cqlengine/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cqlengine/VERSION b/cqlengine/VERSION index 100435be13..6f4eebdf6f 100644 --- a/cqlengine/VERSION +++ b/cqlengine/VERSION @@ -1 +1 @@ -0.8.2 +0.8.1 From 5c16b58a48c493322d515a98b774efb128a50320 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Tue, 24 Sep 2013 13:30:56 -0700 Subject: [PATCH 0394/3726] allow attempt to requery on connection failure --- cqlengine/connection.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index fabf197a31..dc0d73a5a4 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -168,7 +168,7 @@ def execute(self, query, params): except cql.ProgrammingError as ex: raise CQLEngineException(unicode(ex)) except TTransportException: - raise CQLEngineException("Could not execute query against the cluster") + pass def execute(query, params=None): From 8c9487864f43df9d54b95cb22cf1ee8905271467 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Tue, 24 Sep 2013 13:32:53 -0700 Subject: [PATCH 0395/3726] version bumb --- changelog | 10 ++++++++++ cqlengine/VERSION | 2 +- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/changelog b/changelog index 8dd2fe8608..75814f1386 100644 --- a/changelog +++ b/changelog @@ -1,5 +1,15 @@ CHANGELOG +0.8.2 +* fix for connection failover + +0.8.1 +* fix for models not exactly matching schema + +0.8.0 +* support for table polymorphism +* var int type + 0.7.1 * refactoring query class to make defining custom model instantiation logic easier diff --git a/cqlengine/VERSION b/cqlengine/VERSION index 6f4eebdf6f..100435be13 100644 --- a/cqlengine/VERSION +++ b/cqlengine/VERSION @@ -1 +1 @@ -0.8.1 +0.8.2 From 639fef10cf659c573892aa00a022dfe7edb5d3f6 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Wed, 25 Sep 2013 13:35:26 -0700 Subject: [PATCH 0396/3726] fixing documentation inconsistency --- docs/topics/columns.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/topics/columns.rst b/docs/topics/columns.rst index 0418147f18..6d04df6452 100644 --- a/docs/topics/columns.rst +++ b/docs/topics/columns.rst @@ -197,7 +197,7 @@ Column Options .. attribute:: BaseColumn.required - If True, this model cannot be saved without a value defined for this column. Defaults to True. Primary key fields cannot have their required fields set to False. + If True, this model cannot be saved without a value defined for this column. Defaults to False. Primary key fields cannot have their required fields set to False. .. attribute:: BaseColumn.clustering_order From 523d99364a47e1a396e99dc3ed856ec0ec4d0669 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Fri, 27 Sep 2013 13:35:19 -0700 Subject: [PATCH 0397/3726] better logging on operational errors --- cqlengine/connection.py | 6 ++++++ .../tests/connections/test_connection_pool.py | 21 +++++++++++++++++++ 2 files changed, 27 insertions(+) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index dc0d73a5a4..0092e1f538 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -12,6 +12,8 @@ from copy import copy from cqlengine.exceptions import CQLEngineException +from cql import OperationalError + from contextlib import contextmanager from thrift.transport.TTransport import TTransportException @@ -28,6 +30,7 @@ class CQLConnectionError(CQLEngineException): pass connection_pool = None + class CQLConnectionError(CQLEngineException): pass @@ -169,6 +172,9 @@ def execute(self, query, params): raise CQLEngineException(unicode(ex)) except TTransportException: pass + except OperationalError as ex: + LOG.exception("Operational Error %s on %s:%s", ex, con.host, con.port) + raise ex def execute(query, params=None): diff --git a/cqlengine/tests/connections/test_connection_pool.py b/cqlengine/tests/connections/test_connection_pool.py index e69de29bb2..29b3ea50a9 100644 --- a/cqlengine/tests/connections/test_connection_pool.py +++ b/cqlengine/tests/connections/test_connection_pool.py @@ -0,0 +1,21 @@ +from unittest import TestCase +from cql import OperationalError +from mock import MagicMock, patch, Mock + +from cqlengine.connection import ConnectionPool, Host + + +class OperationalErrorLoggingTest(TestCase): + def test_logging(self): + p = ConnectionPool([Host('127.0.0.1', '9160')]) + + class MockConnection(object): + host = 'localhost' + port = 6379 + def cursor(self): + raise OperationalError('test') + + + with patch.object(p, 'get', return_value=MockConnection()): + with self.assertRaises(OperationalError): + p.execute("select * from system.peers", {}) From f6e37e96ce90a35a7a38124c7a451bab3c4b3c9b Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Fri, 27 Sep 2013 13:36:00 -0700 Subject: [PATCH 0398/3726] version bump --- cqlengine/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cqlengine/VERSION b/cqlengine/VERSION index 100435be13..ee94dd834b 100644 --- a/cqlengine/VERSION +++ b/cqlengine/VERSION @@ -1 +1 @@ -0.8.2 +0.8.3 From b8f9b336e8fba3e1404f59aa5d9c784f151db75a Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Fri, 27 Sep 2013 13:36:19 -0700 Subject: [PATCH 0399/3726] updated changelog --- changelog | 3 +++ 1 file changed, 3 insertions(+) diff --git a/changelog b/changelog index 75814f1386..9c8be6538e 100644 --- a/changelog +++ b/changelog @@ -1,5 +1,8 @@ CHANGELOG +0.8.3 +* better logging for operational errors + 0.8.2 * fix for connection failover From 5003e608c6593e98b82479ae9db876fa0ef0eccc Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Wed, 2 Oct 2013 13:40:36 -0700 Subject: [PATCH 0400/3726] updating value manager to use deepcopy instead of copy --- changelog | 3 +++ cqlengine/VERSION | 2 +- cqlengine/columns.py | 8 +++++--- cqlengine/tests/columns/test_container_columns.py | 2 -- 4 files changed, 9 insertions(+), 6 deletions(-) diff --git a/changelog b/changelog index 9c8be6538e..ac3d1a6486 100644 --- a/changelog +++ b/changelog @@ -1,5 +1,8 @@ CHANGELOG +0.8.4 +* changing value manager previous value copying to deepcopy + 0.8.3 * better logging for operational errors diff --git a/cqlengine/VERSION b/cqlengine/VERSION index ee94dd834b..b60d71966a 100644 --- a/cqlengine/VERSION +++ b/cqlengine/VERSION @@ -1 +1 @@ -0.8.3 +0.8.4 diff --git a/cqlengine/columns.py b/cqlengine/columns.py index e40ddc28c0..10a5e1b956 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -1,5 +1,5 @@ #column field types -from copy import copy +from copy import deepcopy from datetime import datetime from datetime import date import re @@ -8,12 +8,13 @@ from cqlengine.exceptions import ValidationError + class BaseValueManager(object): def __init__(self, instance, column, value): self.instance = instance self.column = column - self.previous_value = copy(value) + self.previous_value = deepcopy(value) self.value = value @property @@ -31,7 +32,7 @@ def changed(self): return self.value != self.previous_value def reset_previous_value(self): - self.previous_value = copy(self.value) + self.previous_value = deepcopy(self.value) def getval(self): return self.value @@ -52,6 +53,7 @@ def get_property(self): else: return property(_get, _set) + class ValueQuoter(object): """ contains a single value, which will quote itself for CQL insertion statements diff --git a/cqlengine/tests/columns/test_container_columns.py b/cqlengine/tests/columns/test_container_columns.py index cd11fb5abb..ba4df808c8 100644 --- a/cqlengine/tests/columns/test_container_columns.py +++ b/cqlengine/tests/columns/test_container_columns.py @@ -145,8 +145,6 @@ def test_instantiation_with_column_instance(self): column = columns.Set(columns.Text(min_length=100)) assert isinstance(column.value_col, columns.Text) - - def test_to_python(self): """ Tests that to_python of value column is called """ column = columns.Set(JsonTestColumn) From 10e4d4290405083979ff7d1ce272ed97efb6f759 Mon Sep 17 00:00:00 2001 From: Eric Scrivner Date: Thu, 3 Oct 2013 16:22:34 -0700 Subject: [PATCH 0401/3726] Transport Creation --- cqlengine/connection.py | 57 ++++++++++++++++++++++++++++++++++++++--- 1 file changed, 53 insertions(+), 4 deletions(-) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index 0092e1f538..f6e25e4e76 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -44,11 +44,32 @@ def _column_tuple_factory(colnames, values): return tuple(colnames), [RowResult(v) for v in values] -def setup(hosts, username=None, password=None, max_connections=10, default_keyspace=None, consistency='ONE'): +def setup( + hosts, + username=None, + password=None, + max_connections=10, + default_keyspace=None, + consistency='ONE', + timeout=None): """ Records the hosts and connects to one of them :param hosts: list of hosts, strings in the :, or just + :type hosts: list + :param username: The cassandra username + :type username: str + :param password: The cassandra password + :type password: str + :param max_connections: The maximum number of connections to service + :type max_connections: int or long + :param default_keyspace: The default keyspace to use + :type default_keyspace: str + :param consistency: The global consistency level + :type consistency: str + :param timeout: The connection timeout in milliseconds + :type timeout: int or long + """ global _max_connections global connection_pool @@ -72,17 +93,24 @@ def setup(hosts, username=None, password=None, max_connections=10, default_keysp if not _hosts: raise CQLConnectionError("At least one host required") - connection_pool = ConnectionPool(_hosts, username, password, consistency) + connection_pool = ConnectionPool(_hosts, username, password, consistency, timeout) class ConnectionPool(object): """Handles pooling of database connections.""" - def __init__(self, hosts, username=None, password=None, consistency=None): + def __init__( + self, + hosts, + username=None, + password=None, + consistency=None, + timeout=None): self._hosts = hosts self._username = username self._password = password self._consistency = consistency + self._timeout = timeout self._queue = Queue.Queue(maxsize=_max_connections) @@ -124,6 +152,25 @@ def put(self, conn): else: self._queue.put(conn) + def _create_transport(self, host): + """ + Create a new Thrift transport for the given host. + + :param host: The host object + :type host: Host + + :rtype: thrift.TTransport.* + + """ + from thrift.transport import TSocket, TTransport + + thrift_socket = TSocket.TSocket(host.name, host.port) + + if self._timeout is not None: + thrift_socket.setTimeout(self._timeout) + + return TTransport.TFramedTransport(thrift_socket) + def _create_connection(self): """ Creates a new connection for the connection pool. @@ -138,12 +185,14 @@ def _create_connection(self): for host in hosts: try: + transport = self._create_transport(host) new_conn = cql.connect( host.name, host.port, user=self._username, password=self._password, - consistency_level=self._consistency + consistency_level=self._consistency, + transport=transport ) new_conn.set_cql_version('3.0.0') return new_conn From f4bf649709f8daefc153e91d9ffba31e4f0f4777 Mon Sep 17 00:00:00 2001 From: Eric Scrivner Date: Thu, 3 Oct 2013 16:29:31 -0700 Subject: [PATCH 0402/3726] Update documentation --- cqlengine/connection.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index f6e25e4e76..ecd3edd82b 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -67,7 +67,7 @@ def setup( :type default_keyspace: str :param consistency: The global consistency level :type consistency: str - :param timeout: The connection timeout in milliseconds + :param timeout: The connection timeout in seconds :type timeout: int or long """ From f23d310767a6c506574cc124b9cbfc5312c6887d Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 3 Oct 2013 16:46:23 -0700 Subject: [PATCH 0403/3726] updating version number and docs --- changelog | 3 +++ cqlengine/VERSION | 2 +- docs/topics/connection.rst | 3 +++ 3 files changed, 7 insertions(+), 1 deletion(-) diff --git a/changelog b/changelog index ac3d1a6486..21d31aa7c8 100644 --- a/changelog +++ b/changelog @@ -1,5 +1,8 @@ CHANGELOG +0.8.5 +* adding support for timeouts + 0.8.4 * changing value manager previous value copying to deepcopy diff --git a/cqlengine/VERSION b/cqlengine/VERSION index b60d71966a..7ada0d303f 100644 --- a/cqlengine/VERSION +++ b/cqlengine/VERSION @@ -1 +1 @@ -0.8.4 +0.8.5 diff --git a/docs/topics/connection.rst b/docs/topics/connection.rst index 88f601fbd0..c956b58fda 100644 --- a/docs/topics/connection.rst +++ b/docs/topics/connection.rst @@ -25,6 +25,9 @@ If there is a problem with one of the servers, cqlengine will try to connect to :param consistency: the consistency level of the connection, defaults to 'ONE' :type consistency: str + :param timeout: the connection timeout in seconds + :type timeout: int or long + Records the hosts and connects to one of them See the example at :ref:`getting-started` From d568185c3aa7ad4593d37f638a24cf1759ede6bd Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 3 Oct 2013 16:49:38 -0700 Subject: [PATCH 0404/3726] updating timeout docs --- cqlengine/connection.py | 2 +- docs/topics/connection.rst | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index ecd3edd82b..f6e25e4e76 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -67,7 +67,7 @@ def setup( :type default_keyspace: str :param consistency: The global consistency level :type consistency: str - :param timeout: The connection timeout in seconds + :param timeout: The connection timeout in milliseconds :type timeout: int or long """ diff --git a/docs/topics/connection.rst b/docs/topics/connection.rst index c956b58fda..6140584704 100644 --- a/docs/topics/connection.rst +++ b/docs/topics/connection.rst @@ -25,7 +25,7 @@ If there is a problem with one of the servers, cqlengine will try to connect to :param consistency: the consistency level of the connection, defaults to 'ONE' :type consistency: str - :param timeout: the connection timeout in seconds + :param timeout: the connection timeout in milliseconds :type timeout: int or long Records the hosts and connects to one of them From 2b8424c74895c6a8dbd3d7f2cc789dd1027d6e89 Mon Sep 17 00:00:00 2001 From: Pandu Rao Date: Thu, 3 Oct 2013 18:34:32 -0700 Subject: [PATCH 0405/3726] Added guards to handle None value in Date and DateTime column to_python conversion --- cqlengine/columns.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index 10a5e1b956..a361eb0d43 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -313,6 +313,7 @@ class DateTime(Column): db_type = 'timestamp' def to_python(self, value): + if value is None: return if isinstance(value, datetime): return value elif isinstance(value, date): @@ -340,6 +341,7 @@ class Date(Column): def to_python(self, value): + if value is None: return if isinstance(value, datetime): return value.date() elif isinstance(value, date): From 2a7a11dde4484bf4f0a77152e3ad1615f1235eaf Mon Sep 17 00:00:00 2001 From: Pandu Rao Date: Wed, 16 Oct 2013 18:30:47 -0700 Subject: [PATCH 0406/3726] Added guard to handle None value in Date column to_database conversion --- cqlengine/columns.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index a361eb0d43..d82dd636fc 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -351,6 +351,7 @@ def to_python(self, value): def to_database(self, value): value = super(Date, self).to_database(value) + if value is None: return if isinstance(value, datetime): value = value.date() if not isinstance(value, date): From c9358733f60353f9ef583a166875ccd608792119 Mon Sep 17 00:00:00 2001 From: Pandu Rao Date: Wed, 16 Oct 2013 18:32:05 -0700 Subject: [PATCH 0407/3726] Add testcases for guards for None in date, datetime conversion --- cqlengine/tests/columns/test_validation.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/cqlengine/tests/columns/test_validation.py b/cqlengine/tests/columns/test_validation.py index 3c6c665614..158bda4a52 100644 --- a/cqlengine/tests/columns/test_validation.py +++ b/cqlengine/tests/columns/test_validation.py @@ -68,6 +68,11 @@ def test_datetime_date_support(self): dt2 = self.DatetimeTest.objects(test_id=0).first() assert dt2.created_at.isoformat() == datetime(today.year, today.month, today.day).isoformat() + def test_datetime_none(self): + dt = self.DatetimeTest.objects.create(test_id=0, created_at=None) + dt2 = self.DatetimeTest.objects(test_id=0).first() + assert dt2.created_at is None + class TestVarInt(BaseCassEngTestCase): class VarIntTest(Model): @@ -120,6 +125,11 @@ def test_date_io_using_datetime(self): assert isinstance(dt2.created_at, date) assert dt2.created_at.isoformat() == now.date().isoformat() + def test_date_none(self): + dt = self.DateTest.objects.create(test_id=0, created_at=None) + dt2 = self.DateTest.objects(test_id=0).first() + assert dt2.created_at is None + class TestDecimal(BaseCassEngTestCase): class DecimalTest(Model): From b52e9a9b34411b67d264d4c0a6d4d2c3f2ef76d9 Mon Sep 17 00:00:00 2001 From: Michael Cyrulnik Date: Thu, 17 Oct 2013 11:17:22 -0400 Subject: [PATCH 0408/3726] add BigInt column --- cqlengine/columns.py | 4 ++++ cqlengine/tests/columns/test_validation.py | 11 +++++++++++ cqlengine/tests/columns/test_value_io.py | 6 ++++++ docs/topics/columns.rst | 8 +++++++- 4 files changed, 28 insertions(+), 1 deletion(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index 10a5e1b956..8781e62fc8 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -252,6 +252,10 @@ def to_database(self, value): return self.validate(value) +class BigInt(Integer): + db_type = 'bigint' + + class VarInt(Column): db_type = 'varint' diff --git a/cqlengine/tests/columns/test_validation.py b/cqlengine/tests/columns/test_validation.py index 3c6c665614..b89e4a9172 100644 --- a/cqlengine/tests/columns/test_validation.py +++ b/cqlengine/tests/columns/test_validation.py @@ -15,6 +15,7 @@ from cqlengine.columns import Ascii from cqlengine.columns import Text from cqlengine.columns import Integer +from cqlengine.columns import BigInt from cqlengine.columns import VarInt from cqlengine.columns import DateTime from cqlengine.columns import Date @@ -180,6 +181,16 @@ def test_default_zero_fields_validate(self): it = self.IntegerTest() it.validate() +class TestBigInt(BaseCassEngTestCase): + class BigIntTest(Model): + test_id = UUID(primary_key=True, default=lambda:uuid4()) + value = BigInt(default=0, required=True) + + def test_default_zero_fields_validate(self): + """ Tests that bigint columns with a default value of 0 validate """ + it = self.BigIntTest() + it.validate() + class TestText(BaseCassEngTestCase): def test_min_length(self): diff --git a/cqlengine/tests/columns/test_value_io.py b/cqlengine/tests/columns/test_value_io.py index 1c822a9e5d..54d8f5d5c5 100644 --- a/cqlengine/tests/columns/test_value_io.py +++ b/cqlengine/tests/columns/test_value_io.py @@ -95,6 +95,12 @@ class TestInteger(BaseColumnIOTest): pkey_val = 5 data_val = 6 +class TestBigInt(BaseColumnIOTest): + + column = columns.BigInt + pkey_val = 6 + data_val = pow(2, 63) - 1 + class TestDateTime(BaseColumnIOTest): column = columns.DateTime diff --git a/docs/topics/columns.rst b/docs/topics/columns.rst index 6d04df6452..0f71e3c7ad 100644 --- a/docs/topics/columns.rst +++ b/docs/topics/columns.rst @@ -38,10 +38,16 @@ Columns .. class:: Integer() - Stores an integer value :: + Stores a 32-bit signed integer value :: columns.Integer() +.. class:: BigInt() + + Stores a 64-bit signed long value :: + + columns.BigInt() + .. class:: VarInt() Stores an arbitrary-precision integer :: From 6abf31f2cc0a9437807e7a6f4b585aa8f3a4f425 Mon Sep 17 00:00:00 2001 From: Pandu Rao Date: Thu, 17 Oct 2013 12:22:33 -0700 Subject: [PATCH 0409/3726] Fixed bug in test case and expanded test case to include values_list --- cqlengine/tests/columns/test_validation.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/cqlengine/tests/columns/test_validation.py b/cqlengine/tests/columns/test_validation.py index 158bda4a52..1a72046b2a 100644 --- a/cqlengine/tests/columns/test_validation.py +++ b/cqlengine/tests/columns/test_validation.py @@ -69,10 +69,13 @@ def test_datetime_date_support(self): assert dt2.created_at.isoformat() == datetime(today.year, today.month, today.day).isoformat() def test_datetime_none(self): - dt = self.DatetimeTest.objects.create(test_id=0, created_at=None) - dt2 = self.DatetimeTest.objects(test_id=0).first() + dt = self.DatetimeTest.objects.create(test_id=1, created_at=None) + dt2 = self.DatetimeTest.objects(test_id=1).first() assert dt2.created_at is None + dts = self.DatetimeTest.objects.filter(test_id=1).values_list('created_at') + assert dts[0][0] is None + class TestVarInt(BaseCassEngTestCase): class VarIntTest(Model): @@ -126,10 +129,13 @@ def test_date_io_using_datetime(self): assert dt2.created_at.isoformat() == now.date().isoformat() def test_date_none(self): - dt = self.DateTest.objects.create(test_id=0, created_at=None) - dt2 = self.DateTest.objects(test_id=0).first() + self.DateTest.objects.create(test_id=1, created_at=None) + dt2 = self.DateTest.objects(test_id=1).first() assert dt2.created_at is None + dts = self.DateTest.objects(test_id=1).values_list('created_at') + assert dts[0][0] is None + class TestDecimal(BaseCassEngTestCase): class DecimalTest(Model): From 442872f6a861bcc0686c21bb813f04e23ada6fbb Mon Sep 17 00:00:00 2001 From: Kai Lautaportti Date: Fri, 18 Oct 2013 15:40:12 +0100 Subject: [PATCH 0410/3726] Allow use of timezone aware datetimes in {Max,Min}TimeUUID Convert the given timezone aware datetimes to UTC in a similar manner as the DateTime column type does instead of resulting in an error. --- cqlengine/columns.py | 15 ++----- cqlengine/functions.py | 12 +++-- cqlengine/tests/query/test_queryset.py | 62 ++++++++++++++++++++++++++ 3 files changed, 74 insertions(+), 15 deletions(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index 10a5e1b956..dd7b3ba325 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -328,11 +328,9 @@ def to_database(self, value): else: raise ValidationError("'{}' is not a datetime object".format(value)) epoch = datetime(1970, 1, 1, tzinfo=value.tzinfo) - offset = 0 - if epoch.tzinfo: - offset_delta = epoch.tzinfo.utcoffset(epoch) - offset = offset_delta.days*24*3600 + offset_delta.seconds - return long(((value - epoch).total_seconds() - offset) * 1000) + offset = epoch.tzinfo.utcoffset(epoch).total_seconds() if epoch.tzinfo else 0 + + return long(((value - epoch).total_seconds() - offset) * 1000) class Date(Column): @@ -402,12 +400,7 @@ def from_datetime(self, dt): global _last_timestamp epoch = datetime(1970, 1, 1, tzinfo=dt.tzinfo) - - offset = 0 - if epoch.tzinfo: - offset_delta = epoch.tzinfo.utcoffset(epoch) - offset = offset_delta.days*24*3600 + offset_delta.seconds - + offset = epoch.tzinfo.utcoffset(epoch).total_seconds() if epoch.tzinfo else 0 timestamp = (dt - epoch).total_seconds() - offset node = None diff --git a/cqlengine/functions.py b/cqlengine/functions.py index eee1fcab5e..136618453c 100644 --- a/cqlengine/functions.py +++ b/cqlengine/functions.py @@ -54,8 +54,10 @@ def __init__(self, value): super(MinTimeUUID, self).__init__(value) def get_value(self): - epoch = datetime(1970, 1, 1) - return long((self.value - epoch).total_seconds() * 1000) + epoch = datetime(1970, 1, 1, tzinfo=self.value.tzinfo) + offset = epoch.tzinfo.utcoffset(epoch).total_seconds() if epoch.tzinfo else 0 + + return long(((self.value - epoch).total_seconds() - offset) * 1000) def get_dict(self, column): return {self.identifier: self.get_value()} @@ -79,8 +81,10 @@ def __init__(self, value): super(MaxTimeUUID, self).__init__(value) def get_value(self): - epoch = datetime(1970, 1, 1) - return long((self.value - epoch).total_seconds() * 1000) + epoch = datetime(1970, 1, 1, tzinfo=self.value.tzinfo) + offset = epoch.tzinfo.utcoffset(epoch).total_seconds() if epoch.tzinfo else 0 + + return long(((self.value - epoch).total_seconds() - offset) * 1000) def get_dict(self, column): return {self.identifier: self.get_value()} diff --git a/cqlengine/tests/query/test_queryset.py b/cqlengine/tests/query/test_queryset.py index 117c87c18b..9004a28bf1 100644 --- a/cqlengine/tests/query/test_queryset.py +++ b/cqlengine/tests/query/test_queryset.py @@ -11,6 +11,25 @@ from cqlengine.models import Model from cqlengine import columns from cqlengine import query +from datetime import timedelta +from datetime import tzinfo + + +class TzOffset(tzinfo): + """Minimal implementation of a timezone offset to help testing with timezone + aware datetimes. + """ + def __init__(self, offset): + self._offset = timedelta(hours=offset) + + def utcoffset(self, dt): + return self._offset + + def tzname(self, dt): + return 'TzOffset: {}'.format(self._offset.hours) + + def dst(self, dt): + return timedelta(0) class TestModel(Model): test_id = columns.Integer(primary_key=True) @@ -515,6 +534,49 @@ def tearDownClass(cls): super(TestMinMaxTimeUUIDFunctions, cls).tearDownClass() delete_table(TimeUUIDQueryModel) + def test_tzaware_datetime_support(self): + """Test that using timezone aware datetime instances works with the + MinTimeUUID/MaxTimeUUID functions. + """ + pk = uuid4() + midpoint_utc = datetime.utcnow().replace(tzinfo=TzOffset(0)) + midpoint_helsinki = midpoint_utc.astimezone(TzOffset(3)) + + # Assert pre-condition that we have the same logical point in time + assert midpoint_utc.utctimetuple() == midpoint_helsinki.utctimetuple() + assert midpoint_utc.timetuple() != midpoint_helsinki.timetuple() + + TimeUUIDQueryModel.create( + partition=pk, + time=columns.TimeUUID.from_datetime(midpoint_utc - timedelta(minutes=1)), + data='1') + + TimeUUIDQueryModel.create( + partition=pk, + time=columns.TimeUUID.from_datetime(midpoint_utc), + data='2') + + TimeUUIDQueryModel.create( + partition=pk, + time=columns.TimeUUID.from_datetime(midpoint_utc + timedelta(minutes=1)), + data='3') + + assert ['1', '2'] == [o.data for o in TimeUUIDQueryModel.filter( + TimeUUIDQueryModel.partition == pk, + TimeUUIDQueryModel.time <= functions.MaxTimeUUID(midpoint_utc))] + + assert ['1', '2'] == [o.data for o in TimeUUIDQueryModel.filter( + TimeUUIDQueryModel.partition == pk, + TimeUUIDQueryModel.time <= functions.MaxTimeUUID(midpoint_helsinki))] + + assert ['2', '3'] == [o.data for o in TimeUUIDQueryModel.filter( + TimeUUIDQueryModel.partition == pk, + TimeUUIDQueryModel.time >= functions.MinTimeUUID(midpoint_utc))] + + assert ['2', '3'] == [o.data for o in TimeUUIDQueryModel.filter( + TimeUUIDQueryModel.partition == pk, + TimeUUIDQueryModel.time >= functions.MinTimeUUID(midpoint_helsinki))] + def test_success_case(self): """ Test that the min and max time uuid functions work as expected """ pk = uuid4() From 72959da7db8a1fd479fbe8d1bea406ca49f1c8f1 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 23 Oct 2013 16:49:58 -0700 Subject: [PATCH 0411/3726] fixed authors --- AUTHORS | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/AUTHORS b/AUTHORS index b361bf0de4..a220cb26d6 100644 --- a/AUTHORS +++ b/AUTHORS @@ -1,9 +1,10 @@ PRIMARY AUTHORS Blake Eggleston +Jon Haddad CONTRIBUTORS Eric Scrivner - test environment, connection pooling -Jon Haddad - helped hash out some of the architecture + From 284a69d7f27ade34074a98f582cf92a8088d7d9c Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 24 Oct 2013 11:55:15 -0700 Subject: [PATCH 0412/3726] adding stubbed ttl query tests --- cqlengine/tests/test_ttl.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) create mode 100644 cqlengine/tests/test_ttl.py diff --git a/cqlengine/tests/test_ttl.py b/cqlengine/tests/test_ttl.py new file mode 100644 index 0000000000..2274cf34c5 --- /dev/null +++ b/cqlengine/tests/test_ttl.py @@ -0,0 +1,16 @@ +from cqlengine.tests.base import BaseCassEngTestCase + + +class TTLQueryTests(BaseCassEngTestCase): + + def test_update_queryset_ttl_success_case(self): + """ tests that ttls on querysets work as expected """ + + def test_select_ttl_failure(self): + """ tests that ttls on select queries raise an exception """ + + +class TTLModelTests(BaseCassEngTestCase): + + def test_model_ttl_success_case(self): + """ tests that ttls on models work as expected """ \ No newline at end of file From 7b34285ec296b298d674943899415c4a9dad3b11 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 24 Oct 2013 11:56:20 -0700 Subject: [PATCH 0413/3726] adding batch and ttl to docs --- docs/topics/models.rst | 8 ++++++++ docs/topics/queryset.rst | 9 +++++++++ 2 files changed, 17 insertions(+) diff --git a/docs/topics/models.rst b/docs/topics/models.rst index 06b720d4de..e63c5dc791 100644 --- a/docs/topics/models.rst +++ b/docs/topics/models.rst @@ -138,6 +138,14 @@ Model Methods Deletes the object from the database. + .. method:: batch(batch_object) + + Sets the batch object to run instance updates and inserts queries with. + + .. method:: ttl(batch_object) + + Sets the ttl values to run instance updates and inserts queries with. + Model Attributes ================ diff --git a/docs/topics/queryset.rst b/docs/topics/queryset.rst index cb1fdbb836..08c46a2796 100644 --- a/docs/topics/queryset.rst +++ b/docs/topics/queryset.rst @@ -353,3 +353,12 @@ QuerySet method reference .. method:: allow_filtering() Enables the (usually) unwise practive of querying on a clustering key without also defining a partition key + + .. method:: batch(batch_object) + + Sets the batch object to run the query on. Note that running a select query with a batch object will raise an exception + + .. method:: ttl(batch_object) + + Sets the ttl to run the query query with. Note that running a select query with a ttl value will raise an exception + From c62b7519fd613042c1a46041926c176bedd52f6b Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 24 Oct 2013 12:17:17 -0700 Subject: [PATCH 0414/3726] adding failing tests for empty container column saving --- .../tests/columns/test_container_columns.py | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/cqlengine/tests/columns/test_container_columns.py b/cqlengine/tests/columns/test_container_columns.py index ba4df808c8..2e1d7cb844 100644 --- a/cqlengine/tests/columns/test_container_columns.py +++ b/cqlengine/tests/columns/test_container_columns.py @@ -154,6 +154,10 @@ def test_to_python(self): py_val = column.to_python(db_val.value) assert py_val == val + def test_default_empty_container_saving(self): + """ tests that the default empty container is not saved if it hasn't been updated """ + self.fail("implement") + class TestListModel(Model): partition = columns.UUID(primary_key=True, default=uuid4) @@ -282,6 +286,11 @@ def test_to_python(self): py_val = column.to_python(db_val.value) assert py_val == val + def test_default_empty_container_saving(self): + """ tests that the default empty container is not saved if it hasn't been updated """ + self.fail("implement") + + class TestMapModel(Model): partition = columns.UUID(primary_key=True, default=uuid4) int_map = columns.Map(columns.Integer, columns.UUID, required=False) @@ -309,8 +318,6 @@ def test_empty_retrieve(self): tmp2 = TestMapModel.get(partition=tmp.partition) tmp2.int_map['blah'] = 1 - - def test_io_success(self): """ Tests that a basic usage works as expected """ k1 = uuid4() @@ -370,7 +377,6 @@ def test_updates_from_none(self): m2 = TestMapModel.get(partition=m.partition) assert m2.int_map == expected - def test_updates_to_none(self): """ Tests that setting the field to None works as expected """ m = TestMapModel.create(int_map={1: uuid4()}) @@ -406,6 +412,10 @@ def test_to_python(self): py_val = column.to_python(db_val.value) assert py_val == val + def test_default_empty_container_saving(self): + """ tests that the default empty container is not saved if it hasn't been updated """ + self.fail("implement") + # def test_partial_update_creation(self): # """ # Tests that proper update statements are created for a partial list update From 52e29a57597d9b5bcb1f0ccdd8de3ad0fd589308 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 24 Oct 2013 12:17:52 -0700 Subject: [PATCH 0415/3726] adding update method to model docs --- docs/topics/models.rst | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/docs/topics/models.rst b/docs/topics/models.rst index 06b720d4de..e665e6c5ca 100644 --- a/docs/topics/models.rst +++ b/docs/topics/models.rst @@ -138,6 +138,13 @@ Model Methods Deletes the object from the database. + -- method:: update(**values) + + Performs an update on the model instance. You can pass in values to set on the model + for updating, or you can call without values to execute an update against any modified + fields. If no fields on the model have been modified since loading, no query will be + performed. Model validation is performed normally. + Model Attributes ================ From a57d3730f3ea5180a130c56141f308a28c919492 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 24 Oct 2013 13:08:52 -0700 Subject: [PATCH 0416/3726] separating inserts, updates, column deletes and query value logic --- cqlengine/query.py | 197 +++++++++++++++++++++++++++++---------------- 1 file changed, 129 insertions(+), 68 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index 02a5d7d577..00e9db0c83 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -22,6 +22,7 @@ class MultipleObjectsReturned(QueryException): pass class QueryOperatorException(QueryException): pass + class QueryOperator(object): # The symbol that identifies this operator in filter kwargs # ie: colname__ @@ -116,10 +117,12 @@ def __ne__(self, op): def __hash__(self): return hash(self.column.db_field_name) ^ hash(self.value) + class EqualsOperator(QueryOperator): symbol = 'EQ' cql_symbol = '=' + class IterableQueryValue(QueryValue): def __init__(self, value): try: @@ -133,28 +136,34 @@ def get_dict(self, column): def get_cql(self): return '({})'.format(', '.join(':{}'.format(i) for i in self.identifier)) + class InOperator(EqualsOperator): symbol = 'IN' cql_symbol = 'IN' QUERY_VALUE_WRAPPER = IterableQueryValue + class GreaterThanOperator(QueryOperator): symbol = "GT" cql_symbol = '>' + class GreaterThanOrEqualOperator(QueryOperator): symbol = "GTE" cql_symbol = '>=' + class LessThanOperator(QueryOperator): symbol = "LT" cql_symbol = '<' + class LessThanOrEqualOperator(QueryOperator): symbol = "LTE" cql_symbol = '<=' + class AbstractQueryableColumn(object): """ exposes cql query operators through pythons @@ -192,6 +201,7 @@ class BatchType(object): Unlogged = 'UNLOGGED' Counter = 'COUNTER' + class BatchQuery(object): """ Handles the batching of queries @@ -624,12 +634,20 @@ def delete(self, columns=[]): else: execute(qs, self._where_values()) + def update(self, **values): + """ + updates the contents of the query + """ + qs = ['UPDATE {}'.format(self.column_family_name)] + qs += ['SET'] + def __eq__(self, q): return set(self._where) == set(q._where) def __ne__(self, q): return not (self != q) + class ResultObject(dict): """ adds attribute access to a dictionary @@ -641,6 +659,7 @@ def __getattr__(self, item): except KeyError: raise AttributeError + class SimpleQuerySet(AbstractQuerySet): """ @@ -658,6 +677,7 @@ def _construct_instance(values): return ResultObject(zip(names, values)) return _construct_instance + class ModelQuerySet(AbstractQuerySet): """ @@ -763,10 +783,48 @@ def batch(self, batch_obj): self._batch = batch_obj return self - def save(self): + def _delete_null_columns(self): """ - Creates / updates a row. - This is a blind insert call. + executes a delete query to remove null columns + """ + values, field_names, field_ids, field_values, query_values = self._get_query_values() + + # delete nulled columns and removed map keys + qs = ['DELETE'] + query_values = {} + + del_statements = [] + for k,v in self.instance._values.items(): + col = v.column + if v.deleted: + del_statements += ['"{}"'.format(col.db_field_name)] + elif isinstance(col, Map): + del_statements += col.get_delete_statement(v.value, v.previous_value, query_values) + + if del_statements: + qs += [', '.join(del_statements)] + + qs += ['FROM {}'.format(self.column_family_name)] + + qs += ['WHERE'] + where_statements = [] + for name, col in self.model._primary_keys.items(): + field_id = uuid4().hex + query_values[field_id] = field_values[name] + where_statements += ['"{}" = :{}'.format(col.db_field_name, field_id)] + qs += [' AND '.join(where_statements)] + + qs = ' '.join(qs) + + if self._batch: + self._batch.add_query(qs, query_values) + else: + execute(qs, query_values) + + def update(self): + """ + updates a row. + This is a blind update call. All validation and cleaning needs to happen prior to calling this. """ @@ -774,6 +832,57 @@ def save(self): raise CQLEngineException("DML Query intance attribute is None") assert type(self.instance) == self.model + values, field_names, field_ids, field_values, query_values = self._get_query_values() + + qs = [] + qs += ["UPDATE {}".format(self.column_family_name)] + qs += ["SET"] + + set_statements = [] + #get defined fields and their column names + for name, col in self.model._columns.items(): + if not col.is_primary_key: + val = values.get(name) + if val is None: + continue + if isinstance(col, (BaseContainerColumn, Counter)): + #remove value from query values, the column will handle it + query_values.pop(field_ids.get(name), None) + + val_mgr = self.instance._values[name] + set_statements += col.get_update_statement(val, val_mgr.previous_value, query_values) + + else: + set_statements += ['"{}" = :{}'.format(col.db_field_name, field_ids[col.db_field_name])] + qs += [', '.join(set_statements)] + + qs += ['WHERE'] + + where_statements = [] + for name, col in self.model._primary_keys.items(): + where_statements += ['"{}" = :{}'.format(col.db_field_name, field_ids[col.db_field_name])] + + qs += [' AND '.join(where_statements)] + + # clear the qs if there are no set statements and this is not a counter model + if not set_statements and not self.instance._has_counter: + qs = [] + + qs = ' '.join(qs) + # skip query execution if it's empty + # caused by pointless update queries + if qs: + if self._batch: + self._batch.add_query(qs, query_values) + else: + execute(qs, query_values) + + self._delete_null_columns() + + def _get_query_values(self): + """ + returns all the data needed to do queries + """ #organize data value_pairs = [] values = self.instance._as_dict() @@ -789,42 +898,24 @@ def save(self): field_ids = {n:uuid4().hex for n in field_names} field_values = dict(value_pairs) query_values = {field_ids[n]:field_values[n] for n in field_names} + return values, field_names, field_ids, field_values, query_values - qs = [] - if self.instance._has_counter or self.instance._can_update(): - qs += ["UPDATE {}".format(self.column_family_name)] - qs += ["SET"] - - set_statements = [] - #get defined fields and their column names - for name, col in self.model._columns.items(): - if not col.is_primary_key: - val = values.get(name) - if val is None: - continue - if isinstance(col, (BaseContainerColumn, Counter)): - #remove value from query values, the column will handle it - query_values.pop(field_ids.get(name), None) - - val_mgr = self.instance._values[name] - set_statements += col.get_update_statement(val, val_mgr.previous_value, query_values) - - else: - set_statements += ['"{}" = :{}'.format(col.db_field_name, field_ids[col.db_field_name])] - qs += [', '.join(set_statements)] - - qs += ['WHERE'] - - where_statements = [] - for name, col in self.model._primary_keys.items(): - where_statements += ['"{}" = :{}'.format(col.db_field_name, field_ids[col.db_field_name])] - - qs += [' AND '.join(where_statements)] + def save(self): + """ + Creates / updates a row. + This is a blind insert call. + All validation and cleaning needs to happen + prior to calling this. + """ + if self.instance is None: + raise CQLEngineException("DML Query intance attribute is None") + assert type(self.instance) == self.model - # clear the qs if there are no set statements and this is not a counter model - if not set_statements and not self.instance._has_counter: - qs = [] + values, field_names, field_ids, field_values, query_values = self._get_query_values() + qs = [] + if self.instance._has_counter or self.instance._can_update(): + return self.update() else: qs += ["INSERT INTO {}".format(self.column_family_name)] qs += ["({})".format(', '.join(['"{}"'.format(f) for f in field_names]))] @@ -841,38 +932,8 @@ def save(self): else: execute(qs, query_values) - - # delete nulled columns and removed map keys - qs = ['DELETE'] - query_values = {} - - del_statements = [] - for k,v in self.instance._values.items(): - col = v.column - if v.deleted: - del_statements += ['"{}"'.format(col.db_field_name)] - elif isinstance(col, Map): - del_statements += col.get_delete_statement(v.value, v.previous_value, query_values) - - if del_statements: - qs += [', '.join(del_statements)] - - qs += ['FROM {}'.format(self.column_family_name)] - - qs += ['WHERE'] - where_statements = [] - for name, col in self.model._primary_keys.items(): - field_id = uuid4().hex - query_values[field_id] = field_values[name] - where_statements += ['"{}" = :{}'.format(col.db_field_name, field_id)] - qs += [' AND '.join(where_statements)] - - qs = ' '.join(qs) - - if self._batch: - self._batch.add_query(qs, query_values) - else: - execute(qs, query_values) + # delete any nulled columns + self._delete_null_columns() def delete(self): """ Deletes one instance """ From dcd5a5a1acd80d1f21e9a6c0b8b89bb2e3138993 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 24 Oct 2013 13:17:50 -0700 Subject: [PATCH 0417/3726] ttls are working off the Class and instance --- cqlengine/models.py | 50 ++++++++++++++++++++++++ cqlengine/query.py | 6 ++- cqlengine/tests/query/test_queryset.py | 8 ++-- cqlengine/tests/test_ttl.py | 54 ++++++++++++++++++++++++-- 4 files changed, 110 insertions(+), 8 deletions(-) diff --git a/cqlengine/models.py b/cqlengine/models.py index 2969c228aa..006e0b855f 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -70,6 +70,51 @@ def __call__(self, *args, **kwargs): """ raise NotImplementedError +class TTLDescriptor(object): + """ + returns a query set descriptor + """ + def __get__(self, instance, model): + if instance: + def ttl_setter(ts): + instance._ttl = ts + return instance + return ttl_setter + + qs = model.__queryset__(model) + + def ttl_setter(ts): + qs._ttl = ts + return qs + + return ttl_setter + + def __call__(self, *args, **kwargs): + raise NotImplementedError + + +class ConsistencyDescriptor(object): + """ + returns a query set descriptor if called on Class, instance if it was an instance call + """ + def __get__(self, instance, model): + if instance: + def consistency_setter(consistency): + instance._consistency = consistency + return instance + return consistency_setter + + qs = model.__queryset__(model) + + def consistency_setter(ts): + qs._consistency = ts + return qs + + return consistency_setter + + def __call__(self, *args, **kwargs): + raise NotImplementedError + class ColumnQueryEvaluator(AbstractQueryableColumn): """ @@ -148,6 +193,8 @@ class DoesNotExist(_DoesNotExist): pass class MultipleObjectsReturned(_MultipleObjectsReturned): pass objects = QuerySetDescriptor() + ttl = TTLDescriptor() + consistency = ConsistencyDescriptor() #table names will be generated automatically from it's model and package name #however, you can also define them manually here @@ -179,10 +226,13 @@ class MultipleObjectsReturned(_MultipleObjectsReturned): pass __queryset__ = ModelQuerySet __dmlquery__ = DMLQuery + __ttl__ = None + __read_repair_chance__ = 0.1 def __init__(self, **values): self._values = {} + self._ttl = None for name, column in self._columns.items(): value = values.get(name, None) diff --git a/cqlengine/query.py b/cqlengine/query.py index 02a5d7d577..804650fa13 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -273,6 +273,7 @@ def __init__(self, model): self._result_idx = None self._batch = None + self._ttl = None @property def column_family_name(self): @@ -727,6 +728,10 @@ def _get_ordering_condition(self, colname): return column.db_field_name, order_type + def _get_ttl_statement(self): + return "" + + def values_list(self, *fields, **kwargs): """ Instructs the query set to return tuples, not model instance """ flat = kwargs.pop('flat', False) @@ -755,7 +760,6 @@ def __init__(self, model, instance=None, batch=None): self.column_family_name = self.model.column_family_name() self.instance = instance self._batch = batch - pass def batch(self, batch_obj): if batch_obj is not None and not isinstance(batch_obj, BatchQuery): diff --git a/cqlengine/tests/query/test_queryset.py b/cqlengine/tests/query/test_queryset.py index 117c87c18b..cda6831c2e 100644 --- a/cqlengine/tests/query/test_queryset.py +++ b/cqlengine/tests/query/test_queryset.py @@ -6,7 +6,7 @@ from cqlengine.exceptions import ModelException from cqlengine import functions -from cqlengine.management import create_table +from cqlengine.management import create_table, drop_table from cqlengine.management import delete_table from cqlengine.models import Model from cqlengine import columns @@ -200,9 +200,9 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): super(BaseQuerySetUsage, cls).tearDownClass() - delete_table(TestModel) - delete_table(IndexedTestModel) - delete_table(TestMultiClusteringModel) + drop_table(TestModel) + drop_table(IndexedTestModel) + drop_table(TestMultiClusteringModel) class TestQuerySetCountSelectionAndIteration(BaseQuerySetUsage): diff --git a/cqlengine/tests/test_ttl.py b/cqlengine/tests/test_ttl.py index 2274cf34c5..a0c0a6dfb9 100644 --- a/cqlengine/tests/test_ttl.py +++ b/cqlengine/tests/test_ttl.py @@ -1,7 +1,31 @@ +from cqlengine.management import sync_table, drop_table from cqlengine.tests.base import BaseCassEngTestCase +from cqlengine.models import Model +from uuid import uuid4 +from cqlengine import columns -class TTLQueryTests(BaseCassEngTestCase): +class TestTTLModel(Model): + id = columns.UUID(primary_key=True, default=lambda:uuid4()) + count = columns.Integer() + text = columns.Text(required=False) + + +class BaseTTLTest(BaseCassEngTestCase): + + @classmethod + def setUpClass(cls): + super(BaseTTLTest, cls).setUpClass() + sync_table(TestTTLModel) + + @classmethod + def tearDownClass(cls): + super(BaseTTLTest, cls).tearDownClass() + drop_table(TestTTLModel) + + + +class TTLQueryTests(BaseTTLTest): def test_update_queryset_ttl_success_case(self): """ tests that ttls on querysets work as expected """ @@ -10,7 +34,31 @@ def test_select_ttl_failure(self): """ tests that ttls on select queries raise an exception """ -class TTLModelTests(BaseCassEngTestCase): +class TTLModelTests(BaseTTLTest): def test_model_ttl_success_case(self): - """ tests that ttls on models work as expected """ \ No newline at end of file + """ tests that ttls on models work as expected """ + + def test_queryset_is_returned_on_class(self): + """ + ensures we get a queryset descriptor back + """ + qs = TestTTLModel.ttl(60) + self.assertTrue(isinstance(qs, TestTTLModel.__queryset__), type(qs)) + +class TTLInstanceTest(BaseTTLTest): + def test_instance_is_returned(self): + """ + ensures that we properly handle the instance.ttl(60).save() scenario + :return: + """ + o = TestTTLModel.create(text="whatever") + o.text = "new stuff" + o.ttl(60) + self.assertEqual(60, o._ttl) + o.save() + + + +class QuerySetTTLFragmentTest(BaseTTLTest): + pass From 982031672e7af39320018d64805e4c30c4e6e1e8 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 24 Oct 2013 13:20:17 -0700 Subject: [PATCH 0418/3726] ttl statement fragment --- cqlengine/query.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index 804650fa13..8ff33a6ef1 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -729,8 +729,9 @@ def _get_ordering_condition(self, colname): return column.db_field_name, order_type def _get_ttl_statement(self): - return "" - + if not self._ttl: + return "" + return "USING TTL {}".format(self._ttl) def values_list(self, *fields, **kwargs): """ Instructs the query set to return tuples, not model instance """ From 308a5be608e4ecdd51a4b03b5ae67174bfb8b77c Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 24 Oct 2013 13:25:07 -0700 Subject: [PATCH 0419/3726] updating update method to only issue set statements for modified rows and counters --- cqlengine/query.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/cqlengine/query.py b/cqlengine/query.py index 00e9db0c83..dc7a18724d 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -843,8 +843,16 @@ def update(self): for name, col in self.model._columns.items(): if not col.is_primary_key: val = values.get(name) + + # don't update something that is null if val is None: continue + + # don't update something if it hasn't changed + if not self.instance._values[name].changed and not isinstance(col, Counter): + continue + + # add the update statements if isinstance(col, (BaseContainerColumn, Counter)): #remove value from query values, the column will handle it query_values.pop(field_ids.get(name), None) From f68a5924a6dceeb3ff854bb63bdd70f5cbfe0cfd Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 24 Oct 2013 13:36:43 -0700 Subject: [PATCH 0420/3726] fixing container column bug that would write insert empty container columns --- cqlengine/columns.py | 22 +++++++++++++++++++--- cqlengine/query.py | 2 +- 2 files changed, 20 insertions(+), 4 deletions(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index 10a5e1b956..adccf8f90f 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -198,6 +198,10 @@ def cql(self): def get_cql(self): return '"{}"'.format(self.db_field_name) + def _val_is_null(self, val): + """ determines if the given value equates to a null value for the given column type """ + return val is None + class Bytes(Column): db_type = 'blob' @@ -526,6 +530,15 @@ def get_update_statement(self, val, prev, ctx): """ raise NotImplementedError + def _val_is_null(self, val): + return not val + + +class BaseContainerQuoter(ValueQuoter): + + def __nonzero__(self): + return bool(self.value) + class Set(BaseContainerColumn): """ @@ -535,7 +548,7 @@ class Set(BaseContainerColumn): """ db_type = 'set<{}>' - class Quoter(ValueQuoter): + class Quoter(BaseContainerQuoter): def __str__(self): cq = cql_quote @@ -625,12 +638,15 @@ class List(BaseContainerColumn): """ db_type = 'list<{}>' - class Quoter(ValueQuoter): + class Quoter(BaseContainerQuoter): def __str__(self): cq = cql_quote return '[' + ', '.join([cq(v) for v in self.value]) + ']' + def __nonzero__(self): + return bool(self.value) + def __init__(self, value_type, default=set, **kwargs): return super(List, self).__init__(value_type=value_type, default=default, **kwargs) @@ -736,7 +752,7 @@ class Map(BaseContainerColumn): db_type = 'map<{}, {}>' - class Quoter(ValueQuoter): + class Quoter(BaseContainerQuoter): def __str__(self): cq = cql_quote diff --git a/cqlengine/query.py b/cqlengine/query.py index dc7a18724d..377accb4fd 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -898,7 +898,7 @@ def _get_query_values(self): #get defined fields and their column names for name, col in self.model._columns.items(): val = values.get(name) - if val is None: continue + if col._val_is_null(val): continue value_pairs += [(col.db_field_name, val)] #construct query string From 38df187625d07dd45fca299b0e0f07c68901f9e2 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 24 Oct 2013 13:42:20 -0700 Subject: [PATCH 0421/3726] adding tests around empty container insert bug fix --- .../tests/columns/test_container_columns.py | 28 +++++++++++++++++-- 1 file changed, 25 insertions(+), 3 deletions(-) diff --git a/cqlengine/tests/columns/test_container_columns.py b/cqlengine/tests/columns/test_container_columns.py index 2e1d7cb844..0d3dc524b2 100644 --- a/cqlengine/tests/columns/test_container_columns.py +++ b/cqlengine/tests/columns/test_container_columns.py @@ -156,7 +156,14 @@ def test_to_python(self): def test_default_empty_container_saving(self): """ tests that the default empty container is not saved if it hasn't been updated """ - self.fail("implement") + pkey = uuid4() + # create a row with set data + TestSetModel.create(partition=pkey, int_set={3, 4}) + # create another with no set data + TestSetModel.create(partition=pkey) + + m = TestSetModel.get(partition=pkey) + self.assertEqual(m.int_set, {3, 4}) class TestListModel(Model): @@ -288,7 +295,14 @@ def test_to_python(self): def test_default_empty_container_saving(self): """ tests that the default empty container is not saved if it hasn't been updated """ - self.fail("implement") + pkey = uuid4() + # create a row with list data + TestListModel.create(partition=pkey, int_list=[1,2,3,4]) + # create another with no list data + TestListModel.create(partition=pkey) + + m = TestListModel.get(partition=pkey) + self.assertEqual(m.int_list, [1,2,3,4]) class TestMapModel(Model): @@ -414,7 +428,15 @@ def test_to_python(self): def test_default_empty_container_saving(self): """ tests that the default empty container is not saved if it hasn't been updated """ - self.fail("implement") + pkey = uuid4() + tmap = {1: uuid4(), 2: uuid4()} + # create a row with set data + TestMapModel.create(partition=pkey, int_map=tmap) + # create another with no set data + TestMapModel.create(partition=pkey) + + m = TestMapModel.get(partition=pkey) + self.assertEqual(m.int_map, tmap) # def test_partial_update_creation(self): # """ From 783f77222c27ec5b4268e5fdb7875b98e44c1754 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 24 Oct 2013 13:48:38 -0700 Subject: [PATCH 0422/3726] added consistency constants to top level --- cqlengine/__init__.py | 10 ++++++++++ cqlengine/models.py | 2 +- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/cqlengine/__init__.py b/cqlengine/__init__.py index 639b56b0bd..51b3fc2f22 100644 --- a/cqlengine/__init__.py +++ b/cqlengine/__init__.py @@ -12,3 +12,13 @@ SizeTieredCompactionStrategy = "SizeTieredCompactionStrategy" LeveledCompactionStrategy = "LeveledCompactionStrategy" + +ANY = "ANY" +ONE = "ONE" +TWO = "TWO" +THREE = "THREE" +QUORUM = "QUORUM" +LOCAL_QUORUM = "LOCAL_QUORUM" +EACH_QUORUM = "EACH_QUORUM" +ALL = "ALL" + diff --git a/cqlengine/models.py b/cqlengine/models.py index 006e0b855f..5c365c9a56 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -7,7 +7,6 @@ from cqlengine.query import DoesNotExist as _DoesNotExist from cqlengine.query import MultipleObjectsReturned as _MultipleObjectsReturned - class ModelDefinitionException(ModelException): pass @@ -76,6 +75,7 @@ class TTLDescriptor(object): """ def __get__(self, instance, model): if instance: + # instance method def ttl_setter(ts): instance._ttl = ts return instance From 8355e3bb5f276af6c6d0b9f6c5517e2b1dabfd70 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 24 Oct 2013 14:14:31 -0700 Subject: [PATCH 0423/3726] test that ensures TTL is called on update --- cqlengine/query.py | 5 ++++- cqlengine/tests/test_ttl.py | 14 ++++++++++++-- 2 files changed, 16 insertions(+), 3 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index 19b90cd3b8..1e32acda0a 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -284,6 +284,7 @@ def __init__(self, model): self._batch = None self._ttl = None + self._consistency = None @property def column_family_name(self): @@ -615,7 +616,7 @@ def defer(self, fields): return self._only_or_defer('defer', fields) def create(self, **kwargs): - return self.model(**kwargs).batch(self._batch).save() + return self.model(**kwargs).batch(self._batch).ttl(self._ttl).consistency(self._consistency).save() #----delete--- def delete(self, columns=[]): @@ -935,8 +936,10 @@ def save(self): qs += ['VALUES'] qs += ["({})".format(', '.join([':'+field_ids[f] for f in field_names]))] + qs = ' '.join(qs) + # skip query execution if it's empty # caused by pointless update queries if qs: diff --git a/cqlengine/tests/test_ttl.py b/cqlengine/tests/test_ttl.py index a0c0a6dfb9..92313b96d8 100644 --- a/cqlengine/tests/test_ttl.py +++ b/cqlengine/tests/test_ttl.py @@ -3,7 +3,8 @@ from cqlengine.models import Model from uuid import uuid4 from cqlengine import columns - +import mock +from cqlengine.connection import ConnectionPool class TestTTLModel(Model): id = columns.UUID(primary_key=True, default=lambda:uuid4()) @@ -56,7 +57,16 @@ def test_instance_is_returned(self): o.text = "new stuff" o.ttl(60) self.assertEqual(60, o._ttl) - o.save() + + def test_ttl_is_include_with_query(self): + o = TestTTLModel.create(text="whatever") + o.text = "new stuff" + o.ttl(60) + + with mock.patch.object(ConnectionPool, 'execute') as m: + o.save() + query = m.call_args[0][0] + self.assertIn("USING TTL", query) From 343367dbf7a57af85e6ea9eaecc4202bdb626bdb Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 24 Oct 2013 14:19:41 -0700 Subject: [PATCH 0424/3726] adding update method to model --- cqlengine/models.py | 32 +++++++++++++++++++++++++++++++- cqlengine/query.py | 2 +- 2 files changed, 32 insertions(+), 2 deletions(-) diff --git a/cqlengine/models.py b/cqlengine/models.py index 2969c228aa..c1730b3cc0 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -356,7 +356,6 @@ def get(cls, *args, **kwargs): return cls.objects.get(*args, **kwargs) def save(self): - # handle polymorphic models if self._is_polymorphic: if self._is_polymorphic_base: @@ -375,6 +374,37 @@ def save(self): return self + def update(self, **values): + for k, v in values.items(): + col = self._columns.get(k) + + # check for nonexistant columns + if col is None: + raise ValidationError("{}.{} has no column named: {}".format(self.__module__, self.__name__, k)) + + # check for primary key update attempts + if col.is_primary_key: + raise ValidationError("Cannot apply update to primary key '{}' for {}.{}".format(k, self.__module__, self.__name__)) + + setattr(self, k, v) + + # handle polymorphic models + if self._is_polymorphic: + if self._is_polymorphic_base: + raise PolyMorphicModelException('cannot update polymorphic base model') + else: + setattr(self, self._polymorphic_column_name, self.__polymorphic_key__) + + self.validate() + self.__dmlquery__(self.__class__, self, batch=self._batch).update() + + #reset the value managers + for v in self._values.values(): + v.reset_previous_value() + self._is_persisted = True + + return self + def delete(self): """ Deletes this instance """ self.__dmlquery__(self.__class__, self, batch=self._batch).delete() diff --git a/cqlengine/query.py b/cqlengine/query.py index 377accb4fd..75d2b7a848 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -785,7 +785,7 @@ def batch(self, batch_obj): def _delete_null_columns(self): """ - executes a delete query to remove null columns + executes a delete query to remove columns that have changed to null """ values, field_names, field_ids, field_values, query_values = self._get_query_values() From eea1e7e1898ce1cdb5d6c19bb0d7d45cf7482958 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 24 Oct 2013 14:49:25 -0700 Subject: [PATCH 0425/3726] TTL working for save on an update --- cqlengine/models.py | 2 +- cqlengine/query.py | 10 +++++++++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/cqlengine/models.py b/cqlengine/models.py index 5c365c9a56..cfeb724efc 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -416,7 +416,7 @@ def save(self): is_new = self.pk is None self.validate() - self.__dmlquery__(self.__class__, self, batch=self._batch).save() + self.__dmlquery__(self.__class__, self, batch=self._batch, ttl=self._ttl).save() #reset the value managers for v in self._values.values(): diff --git a/cqlengine/query.py b/cqlengine/query.py index 1e32acda0a..a2ce6d20d8 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -776,12 +776,14 @@ class DMLQuery(object): unlike the read query object, this is mutable """ + _ttl = None - def __init__(self, model, instance=None, batch=None): + def __init__(self, model, instance=None, batch=None, ttl=None): self.model = model self.column_family_name = self.model.column_family_name() self.instance = instance self._batch = batch + self._ttl = ttl def batch(self, batch_obj): if batch_obj is not None and not isinstance(batch_obj, BatchQuery): @@ -878,6 +880,9 @@ def update(self): qs += [' AND '.join(where_statements)] + if self._ttl: + qs += ["USING TTL {}".format(self._ttl)] + # clear the qs if there are no set statements and this is not a counter model if not set_statements and not self.instance._has_counter: qs = [] @@ -936,7 +941,10 @@ def save(self): qs += ['VALUES'] qs += ["({})".format(', '.join([':'+field_ids[f] for f in field_names]))] + if self._ttl: + qs += ["USING TTL {}".format(self._ttl)] + qs += [] qs = ' '.join(qs) From cbc07a9fc88fb28200da8d4a69aed5cb2bf8a2e9 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 24 Oct 2013 14:50:05 -0700 Subject: [PATCH 0426/3726] updating changelog --- changelog | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/changelog b/changelog index 21d31aa7c8..4e7d121ec1 100644 --- a/changelog +++ b/changelog @@ -1,5 +1,9 @@ CHANGELOG +0.9 +* adding update method +* only saving collection fields on insert if they've been modified + 0.8.5 * adding support for timeouts From 4996f05563814312101ce2fe8905615f841f33f6 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 24 Oct 2013 14:50:30 -0700 Subject: [PATCH 0427/3726] fixing update errors --- cqlengine/models.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cqlengine/models.py b/cqlengine/models.py index c1730b3cc0..93a9af984e 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -380,11 +380,11 @@ def update(self, **values): # check for nonexistant columns if col is None: - raise ValidationError("{}.{} has no column named: {}".format(self.__module__, self.__name__, k)) + raise ValidationError("{}.{} has no column named: {}".format(self.__module__, self.__class__.__name__, k)) # check for primary key update attempts if col.is_primary_key: - raise ValidationError("Cannot apply update to primary key '{}' for {}.{}".format(k, self.__module__, self.__name__)) + raise ValidationError("Cannot apply update to primary key '{}' for {}.{}".format(k, self.__module__, self.__class__.__name__)) setattr(self, k, v) From c906c77773c1f726f56d8315d593c14c945eec2b Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 24 Oct 2013 14:50:42 -0700 Subject: [PATCH 0428/3726] adding tests around model updates --- cqlengine/tests/model/test_updates.py | 91 +++++++++++++++++++++++++++ 1 file changed, 91 insertions(+) create mode 100644 cqlengine/tests/model/test_updates.py diff --git a/cqlengine/tests/model/test_updates.py b/cqlengine/tests/model/test_updates.py new file mode 100644 index 0000000000..c9ab45d9de --- /dev/null +++ b/cqlengine/tests/model/test_updates.py @@ -0,0 +1,91 @@ +from uuid import uuid4 + +from mock import patch +from cqlengine.exceptions import ValidationError + +from cqlengine.tests.base import BaseCassEngTestCase +from cqlengine.models import Model +from cqlengine import columns +from cqlengine.management import sync_table, drop_table +from cqlengine.connection import ConnectionPool + + +class TestUpdateModel(Model): + partition = columns.UUID(primary_key=True, default=uuid4) + cluster = columns.UUID(primary_key=True, default=uuid4) + count = columns.Integer(required=False) + text = columns.Text(required=False, index=True) + + +class ModelUpdateTests(BaseCassEngTestCase): + + @classmethod + def setUpClass(cls): + super(ModelUpdateTests, cls).setUpClass() + sync_table(TestUpdateModel) + + @classmethod + def tearDownClass(cls): + super(ModelUpdateTests, cls).tearDownClass() + drop_table(TestUpdateModel) + + def test_update_model(self): + """ tests calling udpate on models with no values passed in """ + m0 = TestUpdateModel.create(count=5, text='monkey') + + # independently save over a new count value, unknown to original instance + m1 = TestUpdateModel.get(partition=m0.partition, cluster=m0.cluster) + m1.count = 6 + m1.save() + + # update the text, and call update + m0.text = 'monkey land' + m0.update() + + # database should reflect both updates + m2 = TestUpdateModel.get(partition=m0.partition, cluster=m0.cluster) + self.assertEqual(m2.count, m1.count) + self.assertEqual(m2.text, m0.text) + + def test_update_values(self): + """ tests calling update on models with values passed in """ + m0 = TestUpdateModel.create(count=5, text='monkey') + + # independently save over a new count value, unknown to original instance + m1 = TestUpdateModel.get(partition=m0.partition, cluster=m0.cluster) + m1.count = 6 + m1.save() + + # update the text, and call update + m0.update(text='monkey land') + self.assertEqual(m0.text, 'monkey land') + + # database should reflect both updates + m2 = TestUpdateModel.get(partition=m0.partition, cluster=m0.cluster) + self.assertEqual(m2.count, m1.count) + self.assertEqual(m2.text, m0.text) + + def test_noop_model_update(self): + """ tests that calling update on a model with no changes will do nothing. """ + m0 = TestUpdateModel.create(count=5, text='monkey') + + with patch.object(ConnectionPool, 'execute') as execute: + m0.update() + assert execute.call_count == 0 + + with patch.object(ConnectionPool, 'execute') as execute: + m0.update(count=5) + assert execute.call_count == 0 + + def test_invalid_update_kwarg(self): + """ tests that passing in a kwarg to the update method that isn't a column will fail """ + m0 = TestUpdateModel.create(count=5, text='monkey') + with self.assertRaises(ValidationError): + m0.update(numbers=20) + + def test_primary_key_update_failure(self): + """ tests that attempting to update the value of a primary key will fail """ + m0 = TestUpdateModel.create(count=5, text='monkey') + with self.assertRaises(ValidationError): + m0.update(partition=uuid4()) + From 2601354448f95348f35e0374717c430780cc7bbe Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 24 Oct 2013 14:52:38 -0700 Subject: [PATCH 0429/3726] TTL working on inserts --- cqlengine/tests/test_ttl.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/cqlengine/tests/test_ttl.py b/cqlengine/tests/test_ttl.py index 92313b96d8..6093b76545 100644 --- a/cqlengine/tests/test_ttl.py +++ b/cqlengine/tests/test_ttl.py @@ -37,8 +37,13 @@ def test_select_ttl_failure(self): class TTLModelTests(BaseTTLTest): - def test_model_ttl_success_case(self): + def test_ttl_included_on_create(self): """ tests that ttls on models work as expected """ + with mock.patch.object(ConnectionPool, 'execute') as m: + TestTTLModel.ttl(60).create(text="hello blake") + + query = m.call_args[0][0] + self.assertIn("USING TTL", query) def test_queryset_is_returned_on_class(self): """ @@ -47,6 +52,8 @@ def test_queryset_is_returned_on_class(self): qs = TestTTLModel.ttl(60) self.assertTrue(isinstance(qs, TestTTLModel.__queryset__), type(qs)) + + class TTLInstanceTest(BaseTTLTest): def test_instance_is_returned(self): """ @@ -58,7 +65,7 @@ def test_instance_is_returned(self): o.ttl(60) self.assertEqual(60, o._ttl) - def test_ttl_is_include_with_query(self): + def test_ttl_is_include_with_query_on_update(self): o = TestTTLModel.create(text="whatever") o.text = "new stuff" o.ttl(60) From ca67541c5a689e7aac4670e11e4727ea1f986fe2 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 24 Oct 2013 14:55:44 -0700 Subject: [PATCH 0430/3726] test for updates --- cqlengine/tests/test_ttl.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/cqlengine/tests/test_ttl.py b/cqlengine/tests/test_ttl.py index 6093b76545..11a868a32e 100644 --- a/cqlengine/tests/test_ttl.py +++ b/cqlengine/tests/test_ttl.py @@ -53,6 +53,17 @@ def test_queryset_is_returned_on_class(self): self.assertTrue(isinstance(qs, TestTTLModel.__queryset__), type(qs)) +class TTLInstanceUpdateTest(BaseTTLTest): + def test_update_includes_ttl(self): + model = TestTTLModel.create(text="goodbye blake") + with mock.patch.object(ConnectionPool, 'execute') as m: + model.ttl(60).update(text="goodbye forever") + + query = m.call_args[0][0] + self.assertIn("USING TTL", query) + + + class TTLInstanceTest(BaseTTLTest): def test_instance_is_returned(self): From e22dea631dc1541b7ebc3cde34f94f36c16ecf37 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 24 Oct 2013 14:57:55 -0700 Subject: [PATCH 0431/3726] update with TTL work --- cqlengine/models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cqlengine/models.py b/cqlengine/models.py index 88abd85a51..16d7b01eb2 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -446,7 +446,7 @@ def update(self, **values): setattr(self, self._polymorphic_column_name, self.__polymorphic_key__) self.validate() - self.__dmlquery__(self.__class__, self, batch=self._batch).update() + self.__dmlquery__(self.__class__, self, batch=self._batch, ttl=self._ttl).update() #reset the value managers for v in self._values.values(): From f1fb9da47c867f4edd57327de0f4a3a773fd0068 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 24 Oct 2013 15:36:20 -0700 Subject: [PATCH 0432/3726] ensure consistency is called with the right param --- cqlengine/connection.py | 11 +++++---- cqlengine/tests/test_consistency.py | 35 +++++++++++++++++++++++++++++ 2 files changed, 42 insertions(+), 4 deletions(-) create mode 100644 cqlengine/tests/test_consistency.py diff --git a/cqlengine/connection.py b/cqlengine/connection.py index f6e25e4e76..df89e430f2 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -165,10 +165,10 @@ def _create_transport(self, host): from thrift.transport import TSocket, TTransport thrift_socket = TSocket.TSocket(host.name, host.port) - + if self._timeout is not None: thrift_socket.setTimeout(self._timeout) - + return TTransport.TFramedTransport(thrift_socket) def _create_connection(self): @@ -202,14 +202,17 @@ def _create_connection(self): raise CQLConnectionError("Could not connect to any server in cluster") - def execute(self, query, params): + def execute(self, query, params, consistency_level=None): + if not consistency_level: + consistency_level = self._consistency + while True: try: con = self.get() if not con: raise CQLEngineException("Error calling execute without calling setup.") cur = con.cursor() - cur.execute(query, params) + cur.execute(query, params, consistency_level=consistency_level) columns = [i[0] for i in cur.description or []] results = [RowResult(r) for r in cur.fetchall()] LOG.debug('{} {}'.format(query, repr(params))) diff --git a/cqlengine/tests/test_consistency.py b/cqlengine/tests/test_consistency.py new file mode 100644 index 0000000000..cf1673ed9c --- /dev/null +++ b/cqlengine/tests/test_consistency.py @@ -0,0 +1,35 @@ +from cqlengine.management import sync_table, drop_table +from cqlengine.tests.base import BaseCassEngTestCase +from cqlengine.models import Model +from uuid import uuid4 +from cqlengine import columns +import mock +from cqlengine.connection import ConnectionPool +from cqlengine import ALL + +class TestConsistencyModel(Model): + id = columns.UUID(primary_key=True, default=lambda:uuid4()) + count = columns.Integer() + text = columns.Text(required=False) + +class BaseConsistencyTest(BaseCassEngTestCase): + + @classmethod + def setUpClass(cls): + super(BaseConsistencyTest, cls).setUpClass() + sync_table(TestConsistencyModel) + + @classmethod + def tearDownClass(cls): + super(BaseConsistencyTest, cls).tearDownClass() + drop_table(TestConsistencyModel) + + +class TestConsistency(BaseConsistencyTest): + def test_create_uses_consistency(self): + + with mock.patch.object(ConnectionPool, 'execute') as m: + TestConsistencyModel.consistency(ALL).create(text="i am not fault tolerant this way") + + args = m.call_args + self.assertEqual(ALL, args[2]) From a7c7a85a3522f38ae6184a4c524bd2487b22dbfc Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 24 Oct 2013 15:58:14 -0700 Subject: [PATCH 0433/3726] adding queryset updates and supporting tests --- cqlengine/query.py | 57 +++++++++++-- cqlengine/tests/query/test_updates.py | 118 ++++++++++++++++++++++++++ 2 files changed, 167 insertions(+), 8 deletions(-) create mode 100644 cqlengine/tests/query/test_updates.py diff --git a/cqlengine/query.py b/cqlengine/query.py index 75d2b7a848..b24c67a06b 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -9,7 +9,7 @@ from cqlengine.connection import connection_manager, execute, RowResult -from cqlengine.exceptions import CQLEngineException +from cqlengine.exceptions import CQLEngineException, ValidationError from cqlengine.functions import QueryValue, Token #CQL 3 reference: @@ -634,13 +634,6 @@ def delete(self, columns=[]): else: execute(qs, self._where_values()) - def update(self, **values): - """ - updates the contents of the query - """ - qs = ['UPDATE {}'.format(self.column_family_name)] - qs += ['SET'] - def __eq__(self, q): return set(self._where) == set(q._where) @@ -760,6 +753,54 @@ def values_list(self, *fields, **kwargs): clone._flat_values_list = flat return clone + def update(self, **values): + """ Updates the rows in this queryset """ + if not values: + raise ValidationError("At least one column needs to be updated") + + set_statements = [] + ctx = {} + nulled_columns = set() + for name, val in values.items(): + col = self.model._columns.get(name) + # check for nonexistant columns + if col is None: + raise ValidationError("{}.{} has no column named: {}".format(self.__module__, self.model.__name__, name)) + # check for primary key update attempts + if col.is_primary_key: + raise ValidationError("Cannot apply update to primary key '{}' for {}.{}".format(name, self.__module__, self.model.__name__)) + + val = col.validate(val) + if val is None: + nulled_columns.add(name) + continue + # add the update statements + if isinstance(col, (BaseContainerColumn, Counter)): + val_mgr = self.instance._values[name] + set_statements += col.get_update_statement(val, val_mgr.previous_value, ctx) + + else: + field_id = uuid4().hex + set_statements += ['"{}" = :{}'.format(col.db_field_name, field_id)] + ctx[field_id] = val + + if set_statements: + qs = "UPDATE {} SET {} WHERE {}".format( + self.column_family_name, + ', '.join(set_statements), + self._where_clause() + ) + ctx.update(self._where_values()) + execute(qs, ctx) + + if nulled_columns: + qs = "DELETE {} FROM {} WHERE {}".format( + ', '.join(nulled_columns), + self.column_family_name, + self._where_clause() + ) + execute(qs, self._where_values()) + class DMLQuery(object): """ diff --git a/cqlengine/tests/query/test_updates.py b/cqlengine/tests/query/test_updates.py new file mode 100644 index 0000000000..56d5cd5ef0 --- /dev/null +++ b/cqlengine/tests/query/test_updates.py @@ -0,0 +1,118 @@ +from uuid import uuid4 +from cqlengine.exceptions import ValidationError + +from cqlengine.tests.base import BaseCassEngTestCase +from cqlengine.models import Model +from cqlengine.management import sync_table, drop_table +from cqlengine import columns + + +class TestQueryUpdateModel(Model): + partition = columns.UUID(primary_key=True, default=uuid4) + cluster = columns.Integer(primary_key=True) + count = columns.Integer(required=False) + text = columns.Text(required=False, index=True) + + +class QueryUpdateTests(BaseCassEngTestCase): + + @classmethod + def setUpClass(cls): + super(QueryUpdateTests, cls).setUpClass() + sync_table(TestQueryUpdateModel) + + @classmethod + def tearDownClass(cls): + super(QueryUpdateTests, cls).tearDownClass() + drop_table(TestQueryUpdateModel) + + def test_update_values(self): + """ tests calling udpate on a queryset """ + partition = uuid4() + for i in range(5): + TestQueryUpdateModel.create(partition=partition, cluster=i, count=i, text=str(i)) + + # sanity check + for i, row in enumerate(TestQueryUpdateModel.objects(partition=partition)): + assert row.cluster == i + assert row.count == i + assert row.text == str(i) + + # perform update + TestQueryUpdateModel.objects(partition=partition, cluster=3).update(count=6) + + for i, row in enumerate(TestQueryUpdateModel.objects(partition=partition)): + assert row.cluster == i + assert row.count == (6 if i == 3 else i) + assert row.text == str(i) + + def test_update_values_validation(self): + """ tests calling udpate on models with values passed in """ + partition = uuid4() + for i in range(5): + TestQueryUpdateModel.create(partition=partition, cluster=i, count=i, text=str(i)) + + # sanity check + for i, row in enumerate(TestQueryUpdateModel.objects(partition=partition)): + assert row.cluster == i + assert row.count == i + assert row.text == str(i) + + # perform update + with self.assertRaises(ValidationError): + TestQueryUpdateModel.objects(partition=partition, cluster=3).update(count='asdf') + + def test_update_with_no_values_failure(self): + """ tests calling update on models with no values passed in """ + with self.assertRaises(ValidationError): + TestQueryUpdateModel.objects(partition=uuid4(), cluster=3).update() + + def test_invalid_update_kwarg(self): + """ tests that passing in a kwarg to the update method that isn't a column will fail """ + with self.assertRaises(ValidationError): + TestQueryUpdateModel.objects(partition=uuid4(), cluster=3).update(bacon=5000) + + def test_primary_key_update_failure(self): + """ tests that attempting to update the value of a primary key will fail """ + with self.assertRaises(ValidationError): + TestQueryUpdateModel.objects(partition=uuid4(), cluster=3).update(cluster=5000) + + def test_null_update_deletes_column(self): + """ setting a field to null in the update should issue a delete statement """ + partition = uuid4() + for i in range(5): + TestQueryUpdateModel.create(partition=partition, cluster=i, count=i, text=str(i)) + + # sanity check + for i, row in enumerate(TestQueryUpdateModel.objects(partition=partition)): + assert row.cluster == i + assert row.count == i + assert row.text == str(i) + + # perform update + TestQueryUpdateModel.objects(partition=partition, cluster=3).update(text=None) + + for i, row in enumerate(TestQueryUpdateModel.objects(partition=partition)): + assert row.cluster == i + assert row.count == i + assert row.text == (None if i == 3 else str(i)) + + def test_mixed_value_and_null_update(self): + """ tests that updating a columns value, and removing another works properly """ + partition = uuid4() + for i in range(5): + TestQueryUpdateModel.create(partition=partition, cluster=i, count=i, text=str(i)) + + # sanity check + for i, row in enumerate(TestQueryUpdateModel.objects(partition=partition)): + assert row.cluster == i + assert row.count == i + assert row.text == str(i) + + # perform update + TestQueryUpdateModel.objects(partition=partition, cluster=3).update(count=6, text=None) + + for i, row in enumerate(TestQueryUpdateModel.objects(partition=partition)): + assert row.cluster == i + assert row.count == (6 if i == 3 else i) + assert row.text == (None if i == 3 else str(i)) From 1aa65768f066bfa6e22f422d0be7aa3b54b2861d Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 24 Oct 2013 17:14:00 -0700 Subject: [PATCH 0434/3726] removing empty update exception --- cqlengine/query.py | 2 +- cqlengine/tests/query/test_updates.py | 6 +----- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index b24c67a06b..6619c5a2bb 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -756,7 +756,7 @@ def values_list(self, *fields, **kwargs): def update(self, **values): """ Updates the rows in this queryset """ if not values: - raise ValidationError("At least one column needs to be updated") + return set_statements = [] ctx = {} diff --git a/cqlengine/tests/query/test_updates.py b/cqlengine/tests/query/test_updates.py index 56d5cd5ef0..0c9c88cf21 100644 --- a/cqlengine/tests/query/test_updates.py +++ b/cqlengine/tests/query/test_updates.py @@ -1,5 +1,6 @@ from uuid import uuid4 from cqlengine.exceptions import ValidationError +from cqlengine.query import QueryException from cqlengine.tests.base import BaseCassEngTestCase from cqlengine.models import Model @@ -62,11 +63,6 @@ def test_update_values_validation(self): with self.assertRaises(ValidationError): TestQueryUpdateModel.objects(partition=partition, cluster=3).update(count='asdf') - def test_update_with_no_values_failure(self): - """ tests calling update on models with no values passed in """ - with self.assertRaises(ValidationError): - TestQueryUpdateModel.objects(partition=uuid4(), cluster=3).update() - def test_invalid_update_kwarg(self): """ tests that passing in a kwarg to the update method that isn't a column will fail """ with self.assertRaises(ValidationError): From 3ef6c10ac670e8c866c959ab8443065f820b687a Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 24 Oct 2013 17:14:34 -0700 Subject: [PATCH 0435/3726] adding docs for queryset update --- docs/topics/models.rst | 2 +- docs/topics/queryset.rst | 13 +++++++++++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/docs/topics/models.rst b/docs/topics/models.rst index e665e6c5ca..42d447f9d7 100644 --- a/docs/topics/models.rst +++ b/docs/topics/models.rst @@ -137,9 +137,9 @@ Model Methods .. method:: delete() Deletes the object from the database. - -- method:: update(**values) + Performs an update on the model instance. You can pass in values to set on the model for updating, or you can call without values to execute an update against any modified fields. If no fields on the model have been modified since loading, no query will be diff --git a/docs/topics/queryset.rst b/docs/topics/queryset.rst index cb1fdbb836..5632e41043 100644 --- a/docs/topics/queryset.rst +++ b/docs/topics/queryset.rst @@ -353,3 +353,16 @@ QuerySet method reference .. method:: allow_filtering() Enables the (usually) unwise practive of querying on a clustering key without also defining a partition key + + -- method:: update(**values) + + Performs an update on the row selected by the queryset. Include values to update in the + update like so: + + .. code-block:: python + Model.objects(key=n).update(value='x') + + Passing in updates for columns which are not part of the model will raise a ValidationError. + Per column validation will be performed, but instance level validation will not + (`Model.validate` is not called). + From bfe2ebd31ba8ddee6eb95b13b73d29bc469ed57d Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 24 Oct 2013 17:23:25 -0700 Subject: [PATCH 0436/3726] adding BigInt to changelog --- changelog | 1 + 1 file changed, 1 insertion(+) diff --git a/changelog b/changelog index 4e7d121ec1..9ab05e8e79 100644 --- a/changelog +++ b/changelog @@ -2,6 +2,7 @@ CHANGELOG 0.9 * adding update method +* adding BigInt column (thanks @Lifto) * only saving collection fields on insert if they've been modified 0.8.5 From 73756b247682f299bf8c3aa2511e10cc36195d84 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 24 Oct 2013 17:28:45 -0700 Subject: [PATCH 0437/3726] adding dokai's time uuid update to changelog --- changelog | 1 + 1 file changed, 1 insertion(+) diff --git a/changelog b/changelog index 9ab05e8e79..57f6fc2c6d 100644 --- a/changelog +++ b/changelog @@ -3,6 +3,7 @@ CHANGELOG 0.9 * adding update method * adding BigInt column (thanks @Lifto) +* adding support for timezone aware time uuid functions (thanks @dokai) * only saving collection fields on insert if they've been modified 0.8.5 From 8b238734e66137f465cb12dc0a9faff15d54a107 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 24 Oct 2013 17:33:52 -0700 Subject: [PATCH 0438/3726] making sure we send the consistency_level explicitly --- cqlengine/connection.py | 6 ++++-- cqlengine/models.py | 10 +++++++--- cqlengine/query.py | 12 +++++++----- cqlengine/tests/test_consistency.py | 8 +++++++- 4 files changed, 25 insertions(+), 11 deletions(-) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index df89e430f2..6fec0d3d64 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -229,9 +229,11 @@ def execute(self, query, params, consistency_level=None): raise ex -def execute(query, params=None): +def execute(query, params=None, consistency_level=None): params = params or {} - return connection_pool.execute(query, params) + if consistency_level is None: + consistency_level = connection_pool._consistency + return connection_pool.execute(query, params, consistency_level) @contextmanager def connection_manager(): diff --git a/cqlengine/models.py b/cqlengine/models.py index 16d7b01eb2..143320a31b 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -106,8 +106,8 @@ def consistency_setter(consistency): qs = model.__queryset__(model) - def consistency_setter(ts): - qs._consistency = ts + def consistency_setter(consistency): + qs._consistency = consistency return qs return consistency_setter @@ -227,6 +227,7 @@ class MultipleObjectsReturned(_MultipleObjectsReturned): pass __dmlquery__ = DMLQuery __ttl__ = None + __consistency__ = None # can be set per query __read_repair_chance__ = 0.1 @@ -446,7 +447,10 @@ def update(self, **values): setattr(self, self._polymorphic_column_name, self.__polymorphic_key__) self.validate() - self.__dmlquery__(self.__class__, self, batch=self._batch, ttl=self._ttl).update() + self.__dmlquery__(self.__class__, self, + batch=self._batch, + ttl=self._ttl, + consistency=self.consistency).update() #reset the value managers for v in self._values.values(): diff --git a/cqlengine/query.py b/cqlengine/query.py index 361f42ef44..d67b45d4a9 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -363,7 +363,7 @@ def _execute_query(self): if self._batch: raise CQLEngineException("Only inserts, updates, and deletes are available in batch mode") if self._result_cache is None: - columns, self._result_cache = execute(self._select_query(), self._where_values()) + columns, self._result_cache = execute(self._select_query(), self._where_values(), self._consistency) self._construct_result = self._get_result_constructor(columns) def _fill_result_cache_to_idx(self, idx): @@ -777,13 +777,15 @@ class DMLQuery(object): unlike the read query object, this is mutable """ _ttl = None + _consistency = None - def __init__(self, model, instance=None, batch=None, ttl=None): + def __init__(self, model, instance=None, batch=None, ttl=None, consistency=None): self.model = model self.column_family_name = self.model.column_family_name() self.instance = instance self._batch = batch self._ttl = ttl + self._consistency = consistency def batch(self, batch_obj): if batch_obj is not None and not isinstance(batch_obj, BatchQuery): @@ -894,7 +896,7 @@ def update(self): if self._batch: self._batch.add_query(qs, query_values) else: - execute(qs, query_values) + execute(qs, query_values, consistency_level=self._consistency) self._delete_null_columns() @@ -954,7 +956,7 @@ def save(self): if self._batch: self._batch.add_query(qs, query_values) else: - execute(qs, query_values) + execute(qs, query_values, self._consistency) # delete any nulled columns self._delete_null_columns() @@ -978,6 +980,6 @@ def delete(self): if self._batch: self._batch.add_query(qs, field_values) else: - execute(qs, field_values) + execute(qs, field_values, self._consistency) diff --git a/cqlengine/tests/test_consistency.py b/cqlengine/tests/test_consistency.py index cf1673ed9c..7ab49d7867 100644 --- a/cqlengine/tests/test_consistency.py +++ b/cqlengine/tests/test_consistency.py @@ -32,4 +32,10 @@ def test_create_uses_consistency(self): TestConsistencyModel.consistency(ALL).create(text="i am not fault tolerant this way") args = m.call_args - self.assertEqual(ALL, args[2]) + self.assertEqual(ALL, args[0][2]) + + def test_queryset_is_returned_on_create(self): + qs = TestConsistencyModel.consistency(ALL) + self.assertTrue(isinstance(qs, TestConsistencyModel.__queryset__), type(qs)) + + From da58f8b2fbb83b535751b6503ba5dd37a8b0da2a Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 24 Oct 2013 17:54:57 -0700 Subject: [PATCH 0439/3726] updates can use consistency levels --- cqlengine/models.py | 9 ++++++--- cqlengine/tests/test_consistency.py | 13 ++++++++++++- 2 files changed, 18 insertions(+), 4 deletions(-) diff --git a/cqlengine/models.py b/cqlengine/models.py index 143320a31b..ced3a3fa08 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -100,7 +100,7 @@ class ConsistencyDescriptor(object): def __get__(self, instance, model): if instance: def consistency_setter(consistency): - instance._consistency = consistency + instance.__consistency__ = consistency return instance return consistency_setter @@ -416,7 +416,10 @@ def save(self): is_new = self.pk is None self.validate() - self.__dmlquery__(self.__class__, self, batch=self._batch, ttl=self._ttl).save() + self.__dmlquery__(self.__class__, self, + batch=self._batch, + ttl=self._ttl, + consistency=self.__consistency__).save() #reset the value managers for v in self._values.values(): @@ -450,7 +453,7 @@ def update(self, **values): self.__dmlquery__(self.__class__, self, batch=self._batch, ttl=self._ttl, - consistency=self.consistency).update() + consistency=self.__consistency__).update() #reset the value managers for v in self._values.values(): diff --git a/cqlengine/tests/test_consistency.py b/cqlengine/tests/test_consistency.py index 7ab49d7867..ec60450420 100644 --- a/cqlengine/tests/test_consistency.py +++ b/cqlengine/tests/test_consistency.py @@ -28,8 +28,9 @@ def tearDownClass(cls): class TestConsistency(BaseConsistencyTest): def test_create_uses_consistency(self): + qs = TestConsistencyModel.consistency(ALL) with mock.patch.object(ConnectionPool, 'execute') as m: - TestConsistencyModel.consistency(ALL).create(text="i am not fault tolerant this way") + qs.create(text="i am not fault tolerant this way") args = m.call_args self.assertEqual(ALL, args[0][2]) @@ -38,4 +39,14 @@ def test_queryset_is_returned_on_create(self): qs = TestConsistencyModel.consistency(ALL) self.assertTrue(isinstance(qs, TestConsistencyModel.__queryset__), type(qs)) + def test_update_uses_consistency(self): + t = TestConsistencyModel.create(text="bacon and eggs") + t.text = "ham sandwich" + + with mock.patch.object(ConnectionPool, 'execute') as m: + t.consistency(ALL).save() + + args = m.call_args + self.assertEqual(ALL, args[0][2]) + From 3f6d293af21a37bf4c3d116d70e9f65dfdb62c55 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 24 Oct 2013 17:56:46 -0700 Subject: [PATCH 0440/3726] update test works --- cqlengine/tests/test_consistency.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cqlengine/tests/test_consistency.py b/cqlengine/tests/test_consistency.py index ec60450420..27c455ef7d 100644 --- a/cqlengine/tests/test_consistency.py +++ b/cqlengine/tests/test_consistency.py @@ -45,7 +45,7 @@ def test_update_uses_consistency(self): with mock.patch.object(ConnectionPool, 'execute') as m: t.consistency(ALL).save() - + args = m.call_args self.assertEqual(ALL, args[0][2]) From 6b1391749edaccbbbd83767a16b8cacb8903a92c Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 24 Oct 2013 18:19:04 -0700 Subject: [PATCH 0441/3726] consistency with batch verified --- cqlengine/query.py | 9 +++++++-- cqlengine/tests/test_consistency.py | 17 ++++++++++++++++- 2 files changed, 23 insertions(+), 3 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index d67b45d4a9..0e9395de6a 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -208,17 +208,22 @@ class BatchQuery(object): http://www.datastax.com/docs/1.2/cql_cli/cql/BATCH """ + _consistency = None - def __init__(self, batch_type=None, timestamp=None): + def __init__(self, batch_type=None, timestamp=None, consistency=None): self.queries = [] self.batch_type = batch_type if timestamp is not None and not isinstance(timestamp, datetime): raise CQLEngineException('timestamp object must be an instance of datetime') self.timestamp = timestamp + self._consistency = consistency def add_query(self, query, params): self.queries.append((query, params)) + def consistency(self, consistency): + self._consistency = consistency + def execute(self): if len(self.queries) == 0: # Empty batch is a no-op @@ -238,7 +243,7 @@ def execute(self): query_list.append('APPLY BATCH;') - execute('\n'.join(query_list), parameters) + execute('\n'.join(query_list), parameters, self._consistency) self.queries = [] diff --git a/cqlengine/tests/test_consistency.py b/cqlengine/tests/test_consistency.py index 27c455ef7d..f4c5986b80 100644 --- a/cqlengine/tests/test_consistency.py +++ b/cqlengine/tests/test_consistency.py @@ -5,7 +5,7 @@ from cqlengine import columns import mock from cqlengine.connection import ConnectionPool -from cqlengine import ALL +from cqlengine import ALL, BatchQuery class TestConsistencyModel(Model): id = columns.UUID(primary_key=True, default=lambda:uuid4()) @@ -50,3 +50,18 @@ def test_update_uses_consistency(self): self.assertEqual(ALL, args[0][2]) + def test_batch_consistency(self): + + with mock.patch.object(ConnectionPool, 'execute') as m: + with BatchQuery(consistency=ALL) as b: + TestConsistencyModel.batch(b).create(text="monkey") + + args = m.call_args + self.assertEqual(ALL, args[0][2]) + + with mock.patch.object(ConnectionPool, 'execute') as m: + with BatchQuery() as b: + TestConsistencyModel.batch(b).create(text="monkey") + + args = m.call_args + self.assertNotEqual(ALL, args[0][2]) From 691ddb31718d03b13c38f85797cec062b1294c5c Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 24 Oct 2013 18:20:30 -0700 Subject: [PATCH 0442/3726] beginning queryset refactor with some basic statement and operator classes and a bunch of empty test files --- cqlengine/operators.py | 71 +++++++++++++++++++ cqlengine/statements.py | 51 +++++++++++++ cqlengine/tests/operators/__init__.py | 1 + .../operators/test_assignment_operators.py | 0 .../tests/operators/test_base_operator.py | 9 +++ .../tests/operators/test_where_operators.py | 30 ++++++++ cqlengine/tests/statements/__init__.py | 1 + .../tests/statements/test_base_statement.py | 0 .../tests/statements/test_delete_statement.py | 0 .../tests/statements/test_dml_statement.py | 0 .../tests/statements/test_insert_statement.py | 0 .../tests/statements/test_select_statement.py | 0 .../tests/statements/test_update_statement.py | 0 .../tests/statements/test_where_clause.py | 13 ++++ 14 files changed, 176 insertions(+) create mode 100644 cqlengine/operators.py create mode 100644 cqlengine/statements.py create mode 100644 cqlengine/tests/operators/__init__.py create mode 100644 cqlengine/tests/operators/test_assignment_operators.py create mode 100644 cqlengine/tests/operators/test_base_operator.py create mode 100644 cqlengine/tests/operators/test_where_operators.py create mode 100644 cqlengine/tests/statements/__init__.py create mode 100644 cqlengine/tests/statements/test_base_statement.py create mode 100644 cqlengine/tests/statements/test_delete_statement.py create mode 100644 cqlengine/tests/statements/test_dml_statement.py create mode 100644 cqlengine/tests/statements/test_insert_statement.py create mode 100644 cqlengine/tests/statements/test_select_statement.py create mode 100644 cqlengine/tests/statements/test_update_statement.py create mode 100644 cqlengine/tests/statements/test_where_clause.py diff --git a/cqlengine/operators.py b/cqlengine/operators.py new file mode 100644 index 0000000000..dae52ee4e5 --- /dev/null +++ b/cqlengine/operators.py @@ -0,0 +1,71 @@ +class QueryOperatorException(Exception): pass + + +class BaseQueryOperator(object): + # The symbol that identifies this operator in kwargs + # ie: colname__ + symbol = None + + # The comparator symbol this operator uses in cql + cql_symbol = None + + def __unicode__(self): + if self.cql_symbol is None: + raise QueryOperatorException("cql symbol is None") + return self.cql_symbol + + @classmethod + def get_operator(cls, symbol): + if cls == BaseQueryOperator: + raise QueryOperatorException("get_operator can only be called from a BaseQueryOperator subclass") + if not hasattr(cls, 'opmap'): + cls.opmap = {} + def _recurse(klass): + if klass.symbol: + cls.opmap[klass.symbol.upper()] = klass + for subklass in klass.__subclasses__(): + _recurse(subklass) + pass + _recurse(cls) + try: + return cls.opmap[symbol.upper()] + except KeyError: + raise QueryOperatorException("{} doesn't map to a QueryOperator".format(symbol)) + + +class BaseWhereOperator(BaseQueryOperator): + """ base operator used for where clauses """ + + +class EqualsOperator(BaseWhereOperator): + symbol = 'EQ' + cql_symbol = '=' + + +class InOperator(EqualsOperator): + symbol = 'IN' + cql_symbol = 'IN' + + +class GreaterThanOperator(BaseWhereOperator): + symbol = "GT" + cql_symbol = '>' + + +class GreaterThanOrEqualOperator(BaseWhereOperator): + symbol = "GTE" + cql_symbol = '>=' + + +class LessThanOperator(BaseWhereOperator): + symbol = "LT" + cql_symbol = '<' + + +class LessThanOrEqualOperator(BaseWhereOperator): + symbol = "LTE" + cql_symbol = '<=' + + +class BaseAssignmentOperator(BaseQueryOperator): + """ base operator used for insert and delete statements """ \ No newline at end of file diff --git a/cqlengine/statements.py b/cqlengine/statements.py new file mode 100644 index 0000000000..e1f5c2deee --- /dev/null +++ b/cqlengine/statements.py @@ -0,0 +1,51 @@ +from cqlengine.operators import BaseWhereOperator + + +class StatementException(Exception): pass + + +class WhereClause(object): + """ a single where statement used in queries """ + + def __init__(self, field, operator, value): + super(WhereClause, self).__init__() + if not isinstance(operator, BaseWhereOperator): + raise StatementException( + "operator must be of type {}, got {}".format(BaseWhereOperator, type(operator)) + ) + + self.field = field + self.operator = operator + self.value = value + + +class BaseCQLStatement(object): + """ The base cql statement class """ + + def __init__(self, table, consistency=None): + super(BaseCQLStatement, self).__init__() + self.table = table + self.consistency = None + self.where_clauses = [] + + + + +class SelectStatement(BaseCQLStatement): + """ a cql select statement """ + + +class DMLStatement(BaseCQLStatement): + """ mutation statements """ + + +class InsertStatement(BaseCQLStatement): + """ an cql insert select statement """ + + +class UpdateStatement(BaseCQLStatement): + """ an cql update select statement """ + + +class DeleteStatement(BaseCQLStatement): + """ a cql delete statement """ diff --git a/cqlengine/tests/operators/__init__.py b/cqlengine/tests/operators/__init__.py new file mode 100644 index 0000000000..f6150a6d76 --- /dev/null +++ b/cqlengine/tests/operators/__init__.py @@ -0,0 +1 @@ +__author__ = 'bdeggleston' diff --git a/cqlengine/tests/operators/test_assignment_operators.py b/cqlengine/tests/operators/test_assignment_operators.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cqlengine/tests/operators/test_base_operator.py b/cqlengine/tests/operators/test_base_operator.py new file mode 100644 index 0000000000..af13fdb115 --- /dev/null +++ b/cqlengine/tests/operators/test_base_operator.py @@ -0,0 +1,9 @@ +from unittest import TestCase +from cqlengine.operators import BaseQueryOperator, QueryOperatorException + + +class BaseOperatorTest(TestCase): + + def test_get_operator_cannot_be_called_from_base_class(self): + with self.assertRaises(QueryOperatorException): + BaseQueryOperator.get_operator('*') \ No newline at end of file diff --git a/cqlengine/tests/operators/test_where_operators.py b/cqlengine/tests/operators/test_where_operators.py new file mode 100644 index 0000000000..f8f0e8fad8 --- /dev/null +++ b/cqlengine/tests/operators/test_where_operators.py @@ -0,0 +1,30 @@ +from unittest import TestCase +from cqlengine.operators import * + + +class TestWhereOperators(TestCase): + + def test_symbol_lookup(self): + """ tests where symbols are looked up properly """ + + def check_lookup(symbol, expected): + op = BaseWhereOperator.get_operator(symbol) + self.assertEqual(op, expected) + + check_lookup('EQ', EqualsOperator) + check_lookup('IN', InOperator) + check_lookup('GT', GreaterThanOperator) + check_lookup('GTE', GreaterThanOrEqualOperator) + check_lookup('LT', LessThanOperator) + check_lookup('LTE', LessThanOrEqualOperator) + + def test_operator_rendering(self): + """ tests symbols are rendered properly """ + self.assertEqual("=", unicode(EqualsOperator())) + self.assertEqual("IN", unicode(InOperator())) + self.assertEqual(">", unicode(GreaterThanOperator())) + self.assertEqual(">=", unicode(GreaterThanOrEqualOperator())) + self.assertEqual("<", unicode(LessThanOperator())) + self.assertEqual("<=", unicode(LessThanOrEqualOperator())) + + diff --git a/cqlengine/tests/statements/__init__.py b/cqlengine/tests/statements/__init__.py new file mode 100644 index 0000000000..f6150a6d76 --- /dev/null +++ b/cqlengine/tests/statements/__init__.py @@ -0,0 +1 @@ +__author__ = 'bdeggleston' diff --git a/cqlengine/tests/statements/test_base_statement.py b/cqlengine/tests/statements/test_base_statement.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cqlengine/tests/statements/test_delete_statement.py b/cqlengine/tests/statements/test_delete_statement.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cqlengine/tests/statements/test_dml_statement.py b/cqlengine/tests/statements/test_dml_statement.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cqlengine/tests/statements/test_insert_statement.py b/cqlengine/tests/statements/test_insert_statement.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cqlengine/tests/statements/test_select_statement.py b/cqlengine/tests/statements/test_select_statement.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cqlengine/tests/statements/test_update_statement.py b/cqlengine/tests/statements/test_update_statement.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cqlengine/tests/statements/test_where_clause.py b/cqlengine/tests/statements/test_where_clause.py new file mode 100644 index 0000000000..65300a97aa --- /dev/null +++ b/cqlengine/tests/statements/test_where_clause.py @@ -0,0 +1,13 @@ +from unittest import TestCase +from cqlengine.statements import StatementException, WhereClause + + +class TestWhereClause(TestCase): + + def test_operator_check(self): + """ tests that creating a where statement with a non BaseWhereOperator object fails """ + with self.assertRaises(StatementException): + WhereClause('a', 'b', 'c') + + def test_where_clause_rendering(self): + """ tests that where clauses are rendered properly """ \ No newline at end of file From 308fd2fbd92c14386bc9026da83335d5c8a49d12 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 24 Oct 2013 18:25:22 -0700 Subject: [PATCH 0443/3726] adding where clause rendering --- cqlengine/operators.py | 3 +++ cqlengine/statements.py | 6 ++++++ cqlengine/tests/statements/test_where_clause.py | 6 +++++- 3 files changed, 14 insertions(+), 1 deletion(-) diff --git a/cqlengine/operators.py b/cqlengine/operators.py index dae52ee4e5..c5ad4a5669 100644 --- a/cqlengine/operators.py +++ b/cqlengine/operators.py @@ -14,6 +14,9 @@ def __unicode__(self): raise QueryOperatorException("cql symbol is None") return self.cql_symbol + def __str__(self): + return str(unicode(self)) + @classmethod def get_operator(cls, symbol): if cls == BaseQueryOperator: diff --git a/cqlengine/statements.py b/cqlengine/statements.py index e1f5c2deee..e3ab61bb73 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -18,6 +18,12 @@ def __init__(self, field, operator, value): self.operator = operator self.value = value + def __unicode__(self): + return u"{} {} {}".format(self.field, self.operator, self.value) + + def __str__(self): + return str(unicode(self)) + class BaseCQLStatement(object): """ The base cql statement class """ diff --git a/cqlengine/tests/statements/test_where_clause.py b/cqlengine/tests/statements/test_where_clause.py index 65300a97aa..a455203642 100644 --- a/cqlengine/tests/statements/test_where_clause.py +++ b/cqlengine/tests/statements/test_where_clause.py @@ -1,4 +1,5 @@ from unittest import TestCase +from cqlengine.operators import EqualsOperator from cqlengine.statements import StatementException, WhereClause @@ -10,4 +11,7 @@ def test_operator_check(self): WhereClause('a', 'b', 'c') def test_where_clause_rendering(self): - """ tests that where clauses are rendered properly """ \ No newline at end of file + """ tests that where clauses are rendered properly """ + wc = WhereClause('a', EqualsOperator(), 'c') + self.assertEqual("a = c", unicode(wc)) + self.assertEqual("a = c", str(wc)) From 3bbf307ca9f7ddb928c67cb547dbbe0aa5d38148 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 24 Oct 2013 18:28:56 -0700 Subject: [PATCH 0444/3726] adding field listification --- cqlengine/statements.py | 8 ++++++-- cqlengine/tests/statements/test_select_statement.py | 10 ++++++++++ 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/cqlengine/statements.py b/cqlengine/statements.py index e3ab61bb73..f448f7ae87 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -35,11 +35,15 @@ def __init__(self, table, consistency=None): self.where_clauses = [] - - class SelectStatement(BaseCQLStatement): """ a cql select statement """ + def __init__(self, table, fields, consistency=None): + super(SelectStatement, self).__init__(table, consistency) + if isinstance(fields, basestring): + fields = [fields] + self.fields = fields + class DMLStatement(BaseCQLStatement): """ mutation statements """ diff --git a/cqlengine/tests/statements/test_select_statement.py b/cqlengine/tests/statements/test_select_statement.py index e69de29bb2..81f800d328 100644 --- a/cqlengine/tests/statements/test_select_statement.py +++ b/cqlengine/tests/statements/test_select_statement.py @@ -0,0 +1,10 @@ +from unittest import TestCase +from cqlengine.statements import SelectStatement + + +class SelectStatementTests(TestCase): + + def test_single_field_is_listified(self): + """ tests that passing a string field into the constructor puts it into a list """ + ss = SelectStatement('table', 'field') + self.assertEqual(ss.fields, ['field']) \ No newline at end of file From c3cf18bbdb5d28b4db1fc2988293a2a20685c3ed Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 24 Oct 2013 18:32:29 -0700 Subject: [PATCH 0445/3726] test for blind updates --- cqlengine/tests/test_consistency.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/cqlengine/tests/test_consistency.py b/cqlengine/tests/test_consistency.py index f4c5986b80..8eadd9c8d6 100644 --- a/cqlengine/tests/test_consistency.py +++ b/cqlengine/tests/test_consistency.py @@ -65,3 +65,14 @@ def test_batch_consistency(self): args = m.call_args self.assertNotEqual(ALL, args[0][2]) + + def test_blind_update(self): + t = TestConsistencyModel.create(text="bacon and eggs") + t.text = "ham sandwich" + uid = t.id + + with mock.patch.object(ConnectionPool, 'execute') as m: + TestConsistencyModel.objects(id=uid).update(text="grilled cheese") + + args = m.call_args + self.assertEqual(ALL, args[0][2]) From 0d708730bbd9750e5608c053d5337bcce1255b67 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 24 Oct 2013 18:34:16 -0700 Subject: [PATCH 0446/3726] adding select field rendering --- cqlengine/statements.py | 7 ++++++- cqlengine/tests/statements/test_select_statement.py | 12 +++++++++++- 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/cqlengine/statements.py b/cqlengine/statements.py index f448f7ae87..b68afcd42b 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -31,7 +31,7 @@ class BaseCQLStatement(object): def __init__(self, table, consistency=None): super(BaseCQLStatement, self).__init__() self.table = table - self.consistency = None + self.consistency = consistency self.where_clauses = [] @@ -44,6 +44,11 @@ def __init__(self, table, fields, consistency=None): fields = [fields] self.fields = fields + def __unicode__(self): + qs = ['SELECT'] + qs += [', '.join(self.fields) if self.fields else '*'] + return ' '.join(qs) + class DMLStatement(BaseCQLStatement): """ mutation statements """ diff --git a/cqlengine/tests/statements/test_select_statement.py b/cqlengine/tests/statements/test_select_statement.py index 81f800d328..9cf1e9db1e 100644 --- a/cqlengine/tests/statements/test_select_statement.py +++ b/cqlengine/tests/statements/test_select_statement.py @@ -7,4 +7,14 @@ class SelectStatementTests(TestCase): def test_single_field_is_listified(self): """ tests that passing a string field into the constructor puts it into a list """ ss = SelectStatement('table', 'field') - self.assertEqual(ss.fields, ['field']) \ No newline at end of file + self.assertEqual(ss.fields, ['field']) + + def test_field_rendering(self): + """ tests that fields are properly added to the select statement """ + ss = SelectStatement('table', ['f1', 'f2']) + self.assertTrue(unicode(ss).startswith('SELECT f1, f2')) + + def test_none_fields_rendering(self): + """ tests that a '*' is added if no fields are passed in """ + ss = SelectStatement('table', None) + self.assertTrue(unicode(ss).startswith('SELECT *')) From 39e5c08880eeb156ba2c8115b0895208a1768c3b Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 24 Oct 2013 19:05:09 -0700 Subject: [PATCH 0447/3726] adding more select rendering and beginning dml statements --- cqlengine/statements.py | 75 ++++++++++++++++--- .../tests/statements/test_insert_statement.py | 11 +++ .../tests/statements/test_select_statement.py | 26 ++++++- 3 files changed, 99 insertions(+), 13 deletions(-) diff --git a/cqlengine/statements.py b/cqlengine/statements.py index b68afcd42b..96581599e3 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -19,7 +19,7 @@ def __init__(self, field, operator, value): self.value = value def __unicode__(self): - return u"{} {} {}".format(self.field, self.operator, self.value) + return u'"{}" {} {}'.format(self.field, self.operator, self.value) def __str__(self): return str(unicode(self)) @@ -28,39 +28,92 @@ def __str__(self): class BaseCQLStatement(object): """ The base cql statement class """ - def __init__(self, table, consistency=None): + def __init__(self, table, consistency=None, where=None): super(BaseCQLStatement, self).__init__() self.table = table self.consistency = consistency - self.where_clauses = [] + self.where_clauses = where or [] + + def add_where_clause(self, clause): + if not isinstance(clause, WhereClause): + raise StatementException("only instances of WhereClause can be added to statements") + self.where_clauses.append(clause) class SelectStatement(BaseCQLStatement): """ a cql select statement """ - def __init__(self, table, fields, consistency=None): - super(SelectStatement, self).__init__(table, consistency) - if isinstance(fields, basestring): - fields = [fields] - self.fields = fields + def __init__(self, + table, + fields, + consistency=None, + where=None, + order_by=None, + limit=None, + allow_filtering=False + ): + super(SelectStatement, self).__init__( + table, + consistency=consistency, + where=where + ) + + self.fields = [fields] if isinstance(fields, basestring) else fields + self.order_by = [order_by] if isinstance(order_by, basestring) else order_by + self.limit = limit + self.allow_filtering = allow_filtering def __unicode__(self): qs = ['SELECT'] - qs += [', '.join(self.fields) if self.fields else '*'] + qs += [', '.join(['"{}"'.format(f) for f in self.fields]) if self.fields else '*'] + qs += ['FROM', self.table] + + if self.where_clauses: + qs += ['WHERE', ' AND '.join([unicode(c) for c in self.where_clauses])] + + if self.order_by: + qs += ['ORDER BY {}'.format(', '.join(unicode(o) for o in self.order_by))] + + if self.limit: + qs += ['LIMIT {}'.format(self.limit)] + + if self.allow_filtering: + qs += ['ALLOW FILTERING'] + return ' '.join(qs) class DMLStatement(BaseCQLStatement): """ mutation statements """ + def __init__(self, table, consistency=None, where=None, ttl=None): + super(DMLStatement, self).__init__( + table, + consistency=consistency, + where=where) + self.ttl = ttl + -class InsertStatement(BaseCQLStatement): +class InsertStatement(DMLStatement): """ an cql insert select statement """ + def __init__(self, table, values, consistency=None): + super(InsertStatement, self).__init__( + table, + consistency=consistency, + where=None + ) + + def add_where_clause(self, clause): + raise StatementException("Cannot add where clauses to insert statements") -class UpdateStatement(BaseCQLStatement): + +class UpdateStatement(DMLStatement): """ an cql update select statement """ + def __init__(self, table, consistency=None, where=None): + super(UpdateStatement, self).__init__(table, consistency, where) + class DeleteStatement(BaseCQLStatement): """ a cql delete statement """ diff --git a/cqlengine/tests/statements/test_insert_statement.py b/cqlengine/tests/statements/test_insert_statement.py index e69de29bb2..6b88ef343c 100644 --- a/cqlengine/tests/statements/test_insert_statement.py +++ b/cqlengine/tests/statements/test_insert_statement.py @@ -0,0 +1,11 @@ +from unittest import TestCase +from cqlengine.statements import InsertStatement, StatementException + + +class InsertStatementTests(TestCase): + + def test_where_clause_failure(self): + """ tests that where clauses cannot be added to Insert statements """ + ist = InsertStatement('table') + with self.assertRaises(StatementException): + ist.add_where_clause('s') \ No newline at end of file diff --git a/cqlengine/tests/statements/test_select_statement.py b/cqlengine/tests/statements/test_select_statement.py index 9cf1e9db1e..bdaa3d8471 100644 --- a/cqlengine/tests/statements/test_select_statement.py +++ b/cqlengine/tests/statements/test_select_statement.py @@ -1,5 +1,6 @@ from unittest import TestCase -from cqlengine.statements import SelectStatement +from cqlengine.statements import SelectStatement, WhereClause +from cqlengine.operators import * class SelectStatementTests(TestCase): @@ -12,9 +13,30 @@ def test_single_field_is_listified(self): def test_field_rendering(self): """ tests that fields are properly added to the select statement """ ss = SelectStatement('table', ['f1', 'f2']) - self.assertTrue(unicode(ss).startswith('SELECT f1, f2')) + self.assertTrue(unicode(ss).startswith('SELECT "f1", "f2"')) + self.assertTrue(str(ss).startswith('SELECT "f1", "f2"')) def test_none_fields_rendering(self): """ tests that a '*' is added if no fields are passed in """ ss = SelectStatement('table', None) self.assertTrue(unicode(ss).startswith('SELECT *')) + self.assertTrue(str(ss).startswith('SELECT *')) + + def test_table_rendering(self): + ss = SelectStatement('table', None) + self.assertTrue(unicode(ss).startswith('SELECT * FROM table')) + self.assertTrue(str(ss).startswith('SELECT * FROM table')) + + def test_where_clause_rendering(self): + ss = SelectStatement('table', None) + ss.add_where_clause(WhereClause('a', EqualsOperator(), 'b')) + self.assertEqual(unicode(ss), 'SELECT * FROM table WHERE "a" = b') + + def test_order_by_rendering(self): + pass + + def test_limit_rendering(self): + pass + + def test_allow_filtering_rendering(self): + pass From 136339d9d3ce1bdb180a2881bd1d686197a76bc8 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 24 Oct 2013 19:10:46 -0700 Subject: [PATCH 0448/3726] moving where rendering into it's own property --- cqlengine/statements.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/cqlengine/statements.py b/cqlengine/statements.py index 96581599e3..6dc9a75ed9 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -39,6 +39,10 @@ def add_where_clause(self, clause): raise StatementException("only instances of WhereClause can be added to statements") self.where_clauses.append(clause) + @property + def _where(self): + return 'WHERE {}'.format(' AND '.join([unicode(c) for c in self.where_clauses)) + class SelectStatement(BaseCQLStatement): """ a cql select statement """ @@ -69,7 +73,7 @@ def __unicode__(self): qs += ['FROM', self.table] if self.where_clauses: - qs += ['WHERE', ' AND '.join([unicode(c) for c in self.where_clauses])] + qs += [self._where] if self.order_by: qs += ['ORDER BY {}'.format(', '.join(unicode(o) for o in self.order_by))] @@ -103,6 +107,7 @@ def __init__(self, table, values, consistency=None): consistency=consistency, where=None ) + self.values = values def add_where_clause(self, clause): raise StatementException("Cannot add where clauses to insert statements") From b23cb13ad8751cc34fcf51ef1a5be496ce8a71f2 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 24 Oct 2013 21:21:47 -0700 Subject: [PATCH 0449/3726] fixing str tests --- cqlengine/statements.py | 5 ++++- .../tests/statements/test_select_statement.py | 14 +++++++------- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/cqlengine/statements.py b/cqlengine/statements.py index 6dc9a75ed9..ee289b44d0 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -39,9 +39,12 @@ def add_where_clause(self, clause): raise StatementException("only instances of WhereClause can be added to statements") self.where_clauses.append(clause) + def __str__(self): + return str(unicode(self)) + @property def _where(self): - return 'WHERE {}'.format(' AND '.join([unicode(c) for c in self.where_clauses)) + return 'WHERE {}'.format(' AND '.join([unicode(c) for c in self.where_clauses])) class SelectStatement(BaseCQLStatement): diff --git a/cqlengine/tests/statements/test_select_statement.py b/cqlengine/tests/statements/test_select_statement.py index bdaa3d8471..d69f77bad8 100644 --- a/cqlengine/tests/statements/test_select_statement.py +++ b/cqlengine/tests/statements/test_select_statement.py @@ -13,24 +13,24 @@ def test_single_field_is_listified(self): def test_field_rendering(self): """ tests that fields are properly added to the select statement """ ss = SelectStatement('table', ['f1', 'f2']) - self.assertTrue(unicode(ss).startswith('SELECT "f1", "f2"')) - self.assertTrue(str(ss).startswith('SELECT "f1", "f2"')) + self.assertTrue(unicode(ss).startswith('SELECT "f1", "f2"'), unicode(ss)) + self.assertTrue(str(ss).startswith('SELECT "f1", "f2"'), str(ss)) def test_none_fields_rendering(self): """ tests that a '*' is added if no fields are passed in """ ss = SelectStatement('table', None) - self.assertTrue(unicode(ss).startswith('SELECT *')) - self.assertTrue(str(ss).startswith('SELECT *')) + self.assertTrue(unicode(ss).startswith('SELECT *'), unicode(ss)) + self.assertTrue(str(ss).startswith('SELECT *'), str(ss)) def test_table_rendering(self): ss = SelectStatement('table', None) - self.assertTrue(unicode(ss).startswith('SELECT * FROM table')) - self.assertTrue(str(ss).startswith('SELECT * FROM table')) + self.assertTrue(unicode(ss).startswith('SELECT * FROM table'), unicode(ss)) + self.assertTrue(str(ss).startswith('SELECT * FROM table'), str(ss)) def test_where_clause_rendering(self): ss = SelectStatement('table', None) ss.add_where_clause(WhereClause('a', EqualsOperator(), 'b')) - self.assertEqual(unicode(ss), 'SELECT * FROM table WHERE "a" = b') + self.assertEqual(unicode(ss), 'SELECT * FROM table WHERE "a" = b', unicode(ss)) def test_order_by_rendering(self): pass From 1207c5fea5e41ef5b92875b30b161223bf5fa3ad Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 24 Oct 2013 21:23:15 -0700 Subject: [PATCH 0450/3726] removing the dev mailing list from the docs --- README.md | 2 -- docs/index.rst | 2 -- setup.py | 2 -- 3 files changed, 6 deletions(-) diff --git a/README.md b/README.md index a1242e34fc..15c591cb8b 100644 --- a/README.md +++ b/README.md @@ -11,8 +11,6 @@ cqlengine is a Cassandra CQL 3 Object Mapper for Python [Users Mailing List](https://groups.google.com/forum/?fromgroups#!forum/cqlengine-users) -[Dev Mailing List](https://groups.google.com/forum/?fromgroups#!forum/cqlengine-dev) - ## Installation ``` pip install cqlengine diff --git a/docs/index.rst b/docs/index.rst index e4e8c394fd..d748f39fd1 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -96,8 +96,6 @@ Getting Started `Users Mailing List `_ -`Dev Mailing List `_ - Indices and tables ================== diff --git a/setup.py b/setup.py index 3bb09c9c2c..2965a6d73a 100644 --- a/setup.py +++ b/setup.py @@ -14,8 +14,6 @@ [Report a Bug](https://github.com/bdeggleston/cqlengine/issues) [Users Mailing List](https://groups.google.com/forum/?fromgroups#!forum/cqlengine-users) - -[Dev Mailing List](https://groups.google.com/forum/?fromgroups#!forum/cqlengine-dev) """ setup( From 5733a05684cce03915dcbea7d157cb3560b49257 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 24 Oct 2013 21:33:50 -0700 Subject: [PATCH 0451/3726] adding tests around misc select statement rendering --- .../tests/statements/test_select_statement.py | 20 +++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/cqlengine/tests/statements/test_select_statement.py b/cqlengine/tests/statements/test_select_statement.py index d69f77bad8..ec7673cd29 100644 --- a/cqlengine/tests/statements/test_select_statement.py +++ b/cqlengine/tests/statements/test_select_statement.py @@ -32,11 +32,15 @@ def test_where_clause_rendering(self): ss.add_where_clause(WhereClause('a', EqualsOperator(), 'b')) self.assertEqual(unicode(ss), 'SELECT * FROM table WHERE "a" = b', unicode(ss)) - def test_order_by_rendering(self): - pass - - def test_limit_rendering(self): - pass - - def test_allow_filtering_rendering(self): - pass + def test_additional_rendering(self): + ss = SelectStatement( + 'table', + None, + order_by=['x', 'y'], + limit=15, + allow_filtering=True + ) + qstr = unicode(ss) + self.assertIn('LIMIT 15', qstr) + self.assertIn('ORDER BY x, y', qstr) + self.assertIn('ALLOW FILTERING', qstr) From e59d75977f683ca135d2a02f9907cdfc0cb9b95c Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 24 Oct 2013 21:34:51 -0700 Subject: [PATCH 0452/3726] fixing statement tests --- cqlengine/tests/statements/test_insert_statement.py | 2 +- cqlengine/tests/statements/test_where_clause.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cqlengine/tests/statements/test_insert_statement.py b/cqlengine/tests/statements/test_insert_statement.py index 6b88ef343c..f81d8ea68d 100644 --- a/cqlengine/tests/statements/test_insert_statement.py +++ b/cqlengine/tests/statements/test_insert_statement.py @@ -6,6 +6,6 @@ class InsertStatementTests(TestCase): def test_where_clause_failure(self): """ tests that where clauses cannot be added to Insert statements """ - ist = InsertStatement('table') + ist = InsertStatement('table', {}) with self.assertRaises(StatementException): ist.add_where_clause('s') \ No newline at end of file diff --git a/cqlengine/tests/statements/test_where_clause.py b/cqlengine/tests/statements/test_where_clause.py index a455203642..353e506fd7 100644 --- a/cqlengine/tests/statements/test_where_clause.py +++ b/cqlengine/tests/statements/test_where_clause.py @@ -13,5 +13,5 @@ def test_operator_check(self): def test_where_clause_rendering(self): """ tests that where clauses are rendered properly """ wc = WhereClause('a', EqualsOperator(), 'c') - self.assertEqual("a = c", unicode(wc)) - self.assertEqual("a = c", str(wc)) + self.assertEqual('"a" = c', unicode(wc)) + self.assertEqual('"a" = c', str(wc)) From e6a411e3fc7e2920a62e0fd04e4e2b472879ecf7 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 24 Oct 2013 21:42:20 -0700 Subject: [PATCH 0453/3726] adding a set clause --- cqlengine/statements.py | 33 ++++++++++++++++++++++++--------- 1 file changed, 24 insertions(+), 9 deletions(-) diff --git a/cqlengine/statements.py b/cqlengine/statements.py index ee289b44d0..4535cc151c 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -1,19 +1,12 @@ -from cqlengine.operators import BaseWhereOperator +from cqlengine.operators import BaseWhereOperator, BaseAssignmentOperator class StatementException(Exception): pass -class WhereClause(object): - """ a single where statement used in queries """ +class BaseClause(object): def __init__(self, field, operator, value): - super(WhereClause, self).__init__() - if not isinstance(operator, BaseWhereOperator): - raise StatementException( - "operator must be of type {}, got {}".format(BaseWhereOperator, type(operator)) - ) - self.field = field self.operator = operator self.value = value @@ -25,6 +18,28 @@ def __str__(self): return str(unicode(self)) +class WhereClause(BaseClause): + """ a single where statement used in queries """ + + def __init__(self, field, operator, value): + if not isinstance(operator, BaseWhereOperator): + raise StatementException( + "operator must be of type {}, got {}".format(BaseWhereOperator, type(operator)) + ) + super(WhereClause, self).__init__(field, operator, value) + + +class SetClause(BaseClause): + """ a single variable st statement """ + + def __init__(self, field, operator, value): + if not isinstance(operator, BaseAssignmentOperator): + raise StatementException( + "operator must be of type {}, got {}".format(BaseAssignmentOperator, type(operator)) + ) + super(SetClause, self).__init__(field, operator, value) + + class BaseCQLStatement(object): """ The base cql statement class """ From 1d816dd07c5099898cf1be40523269adcd2332d4 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 24 Oct 2013 21:50:45 -0700 Subject: [PATCH 0454/3726] adding assignment statement base class --- cqlengine/statements.py | 39 +++++++++++++++++++++++++++++++-------- 1 file changed, 31 insertions(+), 8 deletions(-) diff --git a/cqlengine/statements.py b/cqlengine/statements.py index 4535cc151c..d308d6dc72 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -29,7 +29,7 @@ def __init__(self, field, operator, value): super(WhereClause, self).__init__(field, operator, value) -class SetClause(BaseClause): +class AssignmentClause(BaseClause): """ a single variable st statement """ def __init__(self, field, operator, value): @@ -37,7 +37,7 @@ def __init__(self, field, operator, value): raise StatementException( "operator must be of type {}, got {}".format(BaseAssignmentOperator, type(operator)) ) - super(SetClause, self).__init__(field, operator, value) + super(AssignmentClause, self).__init__(field, operator, value) class BaseCQLStatement(object): @@ -72,8 +72,8 @@ def __init__(self, where=None, order_by=None, limit=None, - allow_filtering=False - ): + allow_filtering=False): + super(SelectStatement, self).__init__( table, consistency=consistency, @@ -106,7 +106,7 @@ def __unicode__(self): class DMLStatement(BaseCQLStatement): - """ mutation statements """ + """ mutation statements with ttls """ def __init__(self, table, consistency=None, where=None, ttl=None): super(DMLStatement, self).__init__( @@ -116,7 +116,30 @@ def __init__(self, table, consistency=None, where=None, ttl=None): self.ttl = ttl -class InsertStatement(DMLStatement): +class AssignmentStatement(DMLStatement): + """ value assignment statements """ + + def __init__(self, + table, + assignments, + consistency=None, + where=None, + ttl=None): + super(AssignmentStatement, self).__init__( + table, + consistency=consistency, + where=where, + ttl=ttl + ) + self.assignments = assignments or [] + + def add_assignment_clause(self, clause): + if not isinstance(clause, AssignmentClause): + raise StatementException("only instances of AssignmentClause can be added to statements") + self.assignments.append(clause) + + +class InsertStatement(AssignmentStatement): """ an cql insert select statement """ def __init__(self, table, values, consistency=None): @@ -131,12 +154,12 @@ def add_where_clause(self, clause): raise StatementException("Cannot add where clauses to insert statements") -class UpdateStatement(DMLStatement): +class UpdateStatement(AssignmentStatement): """ an cql update select statement """ def __init__(self, table, consistency=None, where=None): super(UpdateStatement, self).__init__(table, consistency, where) -class DeleteStatement(BaseCQLStatement): +class DeleteStatement(DMLStatement): """ a cql delete statement """ From 822f6f3c2d47c76ec9e2b7c7eb195a85a0a45171 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 24 Oct 2013 21:58:02 -0700 Subject: [PATCH 0455/3726] setting up constructors for other statements --- cqlengine/statements.py | 23 ++++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/cqlengine/statements.py b/cqlengine/statements.py index d308d6dc72..45f8a49b00 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -80,7 +80,7 @@ def __init__(self, where=where ) - self.fields = [fields] if isinstance(fields, basestring) else fields + self.fields = [fields] if isinstance(fields, basestring) else (fields or []) self.order_by = [order_by] if isinstance(order_by, basestring) else order_by self.limit = limit self.allow_filtering = allow_filtering @@ -157,9 +157,26 @@ def add_where_clause(self, clause): class UpdateStatement(AssignmentStatement): """ an cql update select statement """ - def __init__(self, table, consistency=None, where=None): - super(UpdateStatement, self).__init__(table, consistency, where) + def __init__(self, table, assignments, consistency=None, where=None, ttl=None): + super(UpdateStatement, self).__init__( + table, + assignments, + consistency=consistency, + where=where, + ttl=ttl + ) class DeleteStatement(DMLStatement): """ a cql delete statement """ + + def __init__(self, table, fields, consistency=None, where=None, ttl=None): + super(DeleteStatement, self).__init__( + table, + consistency=consistency, + where=where, + ttl=ttl + ) + self.fields = [fields] if isinstance(fields, basestring) else (fields or []) + + From 58253b4ed73945b8ad5d6b81b5f5fb541ab82665 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Fri, 25 Oct 2013 07:06:40 -0700 Subject: [PATCH 0456/3726] adding deterministic context ids --- cqlengine/operators.py | 10 +++++++++- cqlengine/statements.py | 32 +++++++++++++++++++++++++++++--- 2 files changed, 38 insertions(+), 4 deletions(-) diff --git a/cqlengine/operators.py b/cqlengine/operators.py index c5ad4a5669..cbbfe4ad66 100644 --- a/cqlengine/operators.py +++ b/cqlengine/operators.py @@ -71,4 +71,12 @@ class LessThanOrEqualOperator(BaseWhereOperator): class BaseAssignmentOperator(BaseQueryOperator): - """ base operator used for insert and delete statements """ \ No newline at end of file + """ base operator used for insert and delete statements """ + + +class AssignmentOperator(BaseAssignmentOperator): + cql_symbol = "=" + + +class AddSymbol(BaseAssignmentOperator): + cql_symbol = "+" \ No newline at end of file diff --git a/cqlengine/statements.py b/cqlengine/statements.py index 45f8a49b00..1a7593d7df 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -10,13 +10,27 @@ def __init__(self, field, operator, value): self.field = field self.operator = operator self.value = value + self.context_id = None def __unicode__(self): - return u'"{}" {} {}'.format(self.field, self.operator, self.value) + return u'"{}" {} {}'.format(self.field, self.operator, self.context_id) def __str__(self): return str(unicode(self)) + def get_context_size(self): + """ returns the number of entries this clause will add to the query context """ + return 1 + + def set_context_id(self, i): + """ sets the value placeholder that will be used in the query """ + self.context_id = i + + def update_context(self, ctx): + """ updates the query context with this clauses values """ + assert isinstance(ctx, dict) + ctx[self.context_id] = self.value + class WhereClause(BaseClause): """ a single where statement used in queries """ @@ -47,11 +61,17 @@ def __init__(self, table, consistency=None, where=None): super(BaseCQLStatement, self).__init__() self.table = table self.consistency = consistency - self.where_clauses = where or [] + self.context_counter = 0 + + self.where_clauses = [] + for clause in where or []: + self.add_where_clause(clause) def add_where_clause(self, clause): if not isinstance(clause, WhereClause): raise StatementException("only instances of WhereClause can be added to statements") + clause.set_context_id(self.context_counter) + self.context_counter += clause.get_context_size() self.where_clauses.append(clause) def __str__(self): @@ -131,11 +151,17 @@ def __init__(self, where=where, ttl=ttl ) - self.assignments = assignments or [] + + # add assignments + self.assignments = [] + for assignment in assignments or []: + self.add_assignment_clause(assignment) def add_assignment_clause(self, clause): if not isinstance(clause, AssignmentClause): raise StatementException("only instances of AssignmentClause can be added to statements") + clause.set_context_id(self.context_counter) + self.context_counter += clause.get_context_size() self.assignments.append(clause) From 317b68815583c9e795a1fbddfa41366c7921f789 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Fri, 25 Oct 2013 07:10:36 -0700 Subject: [PATCH 0457/3726] adding test around adding assignments --- .../tests/statements/test_assignment_statement.py | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100644 cqlengine/tests/statements/test_assignment_statement.py diff --git a/cqlengine/tests/statements/test_assignment_statement.py b/cqlengine/tests/statements/test_assignment_statement.py new file mode 100644 index 0000000000..695c7cebc8 --- /dev/null +++ b/cqlengine/tests/statements/test_assignment_statement.py @@ -0,0 +1,11 @@ +from unittest import TestCase +from cqlengine.statements import AssignmentStatement, StatementException + + +class AssignmentStatementTest(TestCase): + + def test_add_assignment_type_checking(self): + """ tests that only assignment clauses can be added to queries """ + stmt = AssignmentStatement('table', []) + with self.assertRaises(StatementException): + stmt.add_assignment_clause('x=5') \ No newline at end of file From 6bfd7536acfde03594c53a115a44a45ecc2922e3 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Fri, 25 Oct 2013 07:12:11 -0700 Subject: [PATCH 0458/3726] adding test around adding where clauses --- cqlengine/tests/statements/test_base_statement.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/cqlengine/tests/statements/test_base_statement.py b/cqlengine/tests/statements/test_base_statement.py index e69de29bb2..4acc1b926f 100644 --- a/cqlengine/tests/statements/test_base_statement.py +++ b/cqlengine/tests/statements/test_base_statement.py @@ -0,0 +1,11 @@ +from unittest import TestCase +from cqlengine.statements import BaseCQLStatement, StatementException + + +class BaseStatementTest(TestCase): + + def test_where_clause_type_checking(self): + """ tests that only assignment clauses can be added to queries """ + stmt = BaseCQLStatement('table', []) + with self.assertRaises(StatementException): + stmt.add_where_clause('x=5') From 66c625027c62f2ba3d5d6df3ceecb2bafc7beff4 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Fri, 25 Oct 2013 07:19:51 -0700 Subject: [PATCH 0459/3726] adding initial unicode rendering for insert statement --- cqlengine/statements.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/cqlengine/statements.py b/cqlengine/statements.py index 1a7593d7df..cbdd55bec4 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -53,6 +53,9 @@ def __init__(self, field, operator, value): ) super(AssignmentClause, self).__init__(field, operator, value) + def insert_tuple(self): + return self.field, self.context_id + class BaseCQLStatement(object): """ The base cql statement class """ @@ -179,6 +182,19 @@ def __init__(self, table, values, consistency=None): def add_where_clause(self, clause): raise StatementException("Cannot add where clauses to insert statements") + def __unicode__(self): + qs = ['INSERT INTO {}'.format(self.table)] + + # get column names and context placeholders + fields = [a.insert_tuple() for a in self.assignments] + columns, values = zip(*fields) + + qs += ["({})".format(', '.join(['"{}"'.format(c) for c in columns]))] + qs += ['VALUES'] + qs += ["({})".format(', '.join([':{}'.format(v) for v in values]))] + + return ' '.join(qs) + class UpdateStatement(AssignmentStatement): """ an cql update select statement """ From edcb766893b6e93f417fabef2aca5598d4cf970c Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Fri, 25 Oct 2013 07:27:31 -0700 Subject: [PATCH 0460/3726] adding test around clause context updating --- cqlengine/tests/statements/test_base_clause.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) create mode 100644 cqlengine/tests/statements/test_base_clause.py diff --git a/cqlengine/tests/statements/test_base_clause.py b/cqlengine/tests/statements/test_base_clause.py new file mode 100644 index 0000000000..04d7d52845 --- /dev/null +++ b/cqlengine/tests/statements/test_base_clause.py @@ -0,0 +1,16 @@ +from unittest import TestCase +from cqlengine.statements import BaseClause + + +class BaseClauseTests(TestCase): + + def test_context_updating(self): + ss = BaseClause('a', 'b') + assert ss.get_context_size() == 1 + + ctx = {} + ss.set_context_id(10) + ss.update_context(ctx) + assert ctx == {10: 'b'} + + From 188fdd54aa20442beb74850a69df9c8aa2e5c91e Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Fri, 25 Oct 2013 07:30:13 -0700 Subject: [PATCH 0461/3726] reworkign clause construction and test around assignment clauses --- cqlengine/statements.py | 19 ++++++------------- .../statements/test_assignment_clause.py | 13 +++++++++++++ 2 files changed, 19 insertions(+), 13 deletions(-) create mode 100644 cqlengine/tests/statements/test_assignment_clause.py diff --git a/cqlengine/statements.py b/cqlengine/statements.py index cbdd55bec4..3c49127cc7 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -6,15 +6,11 @@ class StatementException(Exception): pass class BaseClause(object): - def __init__(self, field, operator, value): + def __init__(self, field, value): self.field = field - self.operator = operator self.value = value self.context_id = None - def __unicode__(self): - return u'"{}" {} {}'.format(self.field, self.operator, self.context_id) - def __str__(self): return str(unicode(self)) @@ -40,19 +36,16 @@ def __init__(self, field, operator, value): raise StatementException( "operator must be of type {}, got {}".format(BaseWhereOperator, type(operator)) ) - super(WhereClause, self).__init__(field, operator, value) + super(WhereClause, self).__init__(field, value) + self.operator = operator + + def __unicode__(self): + return u'"{}" {} {}'.format(self.field, self.operator, self.context_id) class AssignmentClause(BaseClause): """ a single variable st statement """ - def __init__(self, field, operator, value): - if not isinstance(operator, BaseAssignmentOperator): - raise StatementException( - "operator must be of type {}, got {}".format(BaseAssignmentOperator, type(operator)) - ) - super(AssignmentClause, self).__init__(field, operator, value) - def insert_tuple(self): return self.field, self.context_id diff --git a/cqlengine/tests/statements/test_assignment_clause.py b/cqlengine/tests/statements/test_assignment_clause.py new file mode 100644 index 0000000000..b778d4b4a8 --- /dev/null +++ b/cqlengine/tests/statements/test_assignment_clause.py @@ -0,0 +1,13 @@ +from unittest import TestCase +from cqlengine.statements import AssignmentClause + + +class AssignmentClauseTests(TestCase): + + def test_rendering(self): + pass + + def test_insert_tuple(self): + ac = AssignmentClause('a', 'b') + ac.set_context_id(10) + self.assertEqual(ac.insert_tuple(), ('a', 10)) From 0a4eebfcd5447b91d55ed3b57dfa0b92dcaf294e Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Fri, 25 Oct 2013 07:41:15 -0700 Subject: [PATCH 0462/3726] adding tests around insert statements and fixing a few things --- cqlengine/statements.py | 4 ++-- .../tests/statements/test_insert_statement.py | 19 ++++++++++++++++--- .../tests/statements/test_select_statement.py | 3 +++ 3 files changed, 21 insertions(+), 5 deletions(-) diff --git a/cqlengine/statements.py b/cqlengine/statements.py index 3c49127cc7..70db919773 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -164,13 +164,13 @@ def add_assignment_clause(self, clause): class InsertStatement(AssignmentStatement): """ an cql insert select statement """ - def __init__(self, table, values, consistency=None): + def __init__(self, table, assignments, consistency=None): super(InsertStatement, self).__init__( table, + assignments, consistency=consistency, where=None ) - self.values = values def add_where_clause(self, clause): raise StatementException("Cannot add where clauses to insert statements") diff --git a/cqlengine/tests/statements/test_insert_statement.py b/cqlengine/tests/statements/test_insert_statement.py index f81d8ea68d..093e799923 100644 --- a/cqlengine/tests/statements/test_insert_statement.py +++ b/cqlengine/tests/statements/test_insert_statement.py @@ -1,11 +1,24 @@ from unittest import TestCase -from cqlengine.statements import InsertStatement, StatementException +from cqlengine.statements import InsertStatement, StatementException, AssignmentClause class InsertStatementTests(TestCase): def test_where_clause_failure(self): """ tests that where clauses cannot be added to Insert statements """ - ist = InsertStatement('table', {}) + ist = InsertStatement('table', None) with self.assertRaises(StatementException): - ist.add_where_clause('s') \ No newline at end of file + ist.add_where_clause('s') + + def test_statement(self): + ist = InsertStatement('table', None) + ist.add_assignment_clause(AssignmentClause('a', 'b')) + ist.add_assignment_clause(AssignmentClause('c', 'd')) + + self.assertEqual( + unicode(ist), + 'INSERT INTO table ("a", "c") VALUES (:0, :1)' + ) + + def test_additional_rendering(self): + self.fail("Implement ttl and consistency") diff --git a/cqlengine/tests/statements/test_select_statement.py b/cqlengine/tests/statements/test_select_statement.py index ec7673cd29..13ed3d5005 100644 --- a/cqlengine/tests/statements/test_select_statement.py +++ b/cqlengine/tests/statements/test_select_statement.py @@ -44,3 +44,6 @@ def test_additional_rendering(self): self.assertIn('LIMIT 15', qstr) self.assertIn('ORDER BY x, y', qstr) self.assertIn('ALLOW FILTERING', qstr) + + self.fail("Implement ttl and consistency") + From 9413595f10b04dd5d29aa90a5c62733f88c17b21 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Fri, 25 Oct 2013 10:57:53 -0700 Subject: [PATCH 0463/3726] fixed ttl bug on updates --- cqlengine/query.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index 0e8d9ce1e0..a88c744be4 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -805,10 +805,12 @@ def update(self, **values): ctx[field_id] = val if set_statements: - qs = "UPDATE {} SET {} WHERE {}".format( + ttl_stmt = "USING TTL {}".format(self._ttl) if self._ttl else "" + qs = "UPDATE {} SET {} WHERE {} {}".format( self.column_family_name, ', '.join(set_statements), - self._where_clause() + self._where_clause(), + ttl_stmt ) ctx.update(self._where_values()) execute(qs, ctx, self._consistency) From 735b05f6ef7213c0f8e26e0959f6bfb0af0c5ff6 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Fri, 25 Oct 2013 11:53:08 -0700 Subject: [PATCH 0464/3726] clone instead of modify self --- cqlengine/query.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index a88c744be4..a542c303d2 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -307,7 +307,7 @@ def __call__(self, *args, **kwargs): def __deepcopy__(self, memo): clone = self.__class__(self.model) for k,v in self.__dict__.items(): - if k in ['_con', '_cur', '_result_cache', '_result_idx']: + if k in ['_con', '_cur', '_result_cache', '_result_idx']: # don't clone these clone.__dict__[k] = None elif k == '_batch': # we need to keep the same batch instance across @@ -766,12 +766,14 @@ def values_list(self, *fields, **kwargs): return clone def consistency(self, consistency): - self._consistency = consistency - return self + clone = copy.deepcopy(self) + clone._consistency = consistency + return clone def ttl(self, ttl): - self._ttl = ttl - return self + clone = copy.deepcopy(self) + clone._ttl = ttl + return clone def update(self, **values): """ Updates the rows in this queryset """ From de041cd08d5281b993f20981d14195e83900ed96 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Fri, 25 Oct 2013 11:57:23 -0700 Subject: [PATCH 0465/3726] adding method for determining which column values have changed --- cqlengine/models.py | 4 ++++ cqlengine/tests/model/test_model_io.py | 12 +++++++++--- docs/topics/models.rst | 5 +++++ 3 files changed, 18 insertions(+), 3 deletions(-) diff --git a/cqlengine/models.py b/cqlengine/models.py index 93a9af984e..7012fc4471 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -409,6 +409,10 @@ def delete(self): """ Deletes this instance """ self.__dmlquery__(self.__class__, self, batch=self._batch).delete() + def get_changed_columns(self): + """ returns a list of the columns that have been updated since instantiation or save """ + return [k for k,v in self._values.items() if v.changed] + @classmethod def _class_batch(cls, batch): return cls.objects.batch(batch) diff --git a/cqlengine/tests/model/test_model_io.py b/cqlengine/tests/model/test_model_io.py index 08d41d8b27..91d4591246 100644 --- a/cqlengine/tests/model/test_model_io.py +++ b/cqlengine/tests/model/test_model_io.py @@ -42,8 +42,6 @@ def test_model_save_and_load(self): for cname in tm._columns.keys(): self.assertEquals(getattr(tm, cname), getattr(tm2, cname)) - - def test_model_updating_works_properly(self): """ Tests that subsequent saves after initial model creation work @@ -78,7 +76,6 @@ def test_column_deleting_works_properly(self): assert tm2.text is None assert tm2._values['text'].previous_value is None - def test_a_sensical_error_is_raised_if_you_try_to_create_a_table_twice(self): """ """ @@ -92,6 +89,7 @@ class TestMultiKeyModel(Model): count = columns.Integer(required=False) text = columns.Text(required=False) + class TestDeleting(BaseCassEngTestCase): @classmethod @@ -158,6 +156,14 @@ def test_deleting_only(self): assert check.count is None assert check.text is None + def test_get_changed_columns(self): + assert self.instance.get_changed_columns() == [] + self.instance.count = 1 + changes = self.instance.get_changed_columns() + assert len(changes) == 1 + assert changes == ['count'] + self.instance.save() + assert self.instance.get_changed_columns() == [] class TestCanUpdate(BaseCassEngTestCase): diff --git a/docs/topics/models.rst b/docs/topics/models.rst index 42d447f9d7..3e776e5be6 100644 --- a/docs/topics/models.rst +++ b/docs/topics/models.rst @@ -137,6 +137,7 @@ Model Methods .. method:: delete() Deletes the object from the database. + -- method:: update(**values) @@ -145,6 +146,10 @@ Model Methods fields. If no fields on the model have been modified since loading, no query will be performed. Model validation is performed normally. + -- method:: get_changed_columns() + + Returns a list of column names that have changed since the model was instantiated or saved + Model Attributes ================ From dfb062ec85c555c5b0bea2d490c0d61bcf7c93be Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Fri, 25 Oct 2013 12:01:39 -0700 Subject: [PATCH 0466/3726] updating changelog --- changelog | 3 +++ 1 file changed, 3 insertions(+) diff --git a/changelog b/changelog index 57f6fc2c6d..4aad4980c0 100644 --- a/changelog +++ b/changelog @@ -2,9 +2,12 @@ CHANGELOG 0.9 * adding update method +* adding support for ttls +* adding support for per-query consistency * adding BigInt column (thanks @Lifto) * adding support for timezone aware time uuid functions (thanks @dokai) * only saving collection fields on insert if they've been modified +* adding model method that returns a list of modified columns 0.8.5 * adding support for timeouts From b0feecfed31c81d45a4f8b5b62669e304a2ba2cd Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Fri, 25 Oct 2013 12:03:24 -0700 Subject: [PATCH 0467/3726] bumping version number --- cqlengine/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cqlengine/VERSION b/cqlengine/VERSION index 7ada0d303f..b63ba696b7 100644 --- a/cqlengine/VERSION +++ b/cqlengine/VERSION @@ -1 +1 @@ -0.8.5 +0.9 From aa87e3a3a6686abfe2b0ffb11f366bc1c067c62f Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Fri, 25 Oct 2013 12:44:32 -0700 Subject: [PATCH 0468/3726] ignore docs build folder --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 1e4d8eb344..1b5006e6b8 100644 --- a/.gitignore +++ b/.gitignore @@ -42,3 +42,4 @@ html/ /commitlog /data +docs/_build From 6056f2fac1d4ec695949f19de6fdc44231533775 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Fri, 25 Oct 2013 13:14:55 -0700 Subject: [PATCH 0469/3726] docs fix --- docs/topics/queryset.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/topics/queryset.rst b/docs/topics/queryset.rst index 6caf910612..2311801219 100644 --- a/docs/topics/queryset.rst +++ b/docs/topics/queryset.rst @@ -362,7 +362,7 @@ QuerySet method reference Sets the ttl to run the query query with. Note that running a select query with a ttl value will raise an exception - -- method:: update(**values) + .. method:: update(**values) Performs an update on the row selected by the queryset. Include values to update in the update like so: From 03e869c10ad1c7398cc00f11fa3c80a965068793 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Fri, 25 Oct 2013 13:20:01 -0700 Subject: [PATCH 0470/3726] ttl docs --- docs/topics/queryset.rst | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/docs/topics/queryset.rst b/docs/topics/queryset.rst index 2311801219..a73ed1d05f 100644 --- a/docs/topics/queryset.rst +++ b/docs/topics/queryset.rst @@ -321,6 +321,15 @@ QuerySet method reference Returns a queryset matching all rows + .. method:: batch(batch_object) + + Sets the batch object to run the query on. Note that running a select query with a batch object will raise an exception + + .. method:: consistency(consistency_setting) + + Sets the consistency level for the operation. Options may be imported from the top level :attr:`cqlengine` package. + + .. method:: count() Returns the number of matching rows in your QuerySet @@ -354,12 +363,12 @@ QuerySet method reference Enables the (usually) unwise practive of querying on a clustering key without also defining a partition key - .. method:: batch(batch_object) - - Sets the batch object to run the query on. Note that running a select query with a batch object will raise an exception .. method:: ttl(ttl_in_seconds) + :param ttl_in_seconds: time in seconds in which the saved values should expire + :type ttl_in_seconds: int + Sets the ttl to run the query query with. Note that running a select query with a ttl value will raise an exception .. method:: update(**values) From 1e6462366184120b05e0b07ffaef6307230c6a67 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Fri, 25 Oct 2013 13:48:03 -0700 Subject: [PATCH 0471/3726] implementing delete statement and adding method to get statement context --- cqlengine/statements.py | 37 ++++++++++++++++++++----------------- 1 file changed, 20 insertions(+), 17 deletions(-) diff --git a/cqlengine/statements.py b/cqlengine/statements.py index 70db919773..7278b8c434 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -70,6 +70,12 @@ def add_where_clause(self, clause): self.context_counter += clause.get_context_size() self.where_clauses.append(clause) + def get_context(self): + ctx = {} + for clause in self.where_clauses or []: + clause.update_context(ctx) + return ctx + def __str__(self): return str(unicode(self)) @@ -83,7 +89,7 @@ class SelectStatement(BaseCQLStatement): def __init__(self, table, - fields, + fields=None, consistency=None, where=None, order_by=None, @@ -121,18 +127,7 @@ def __unicode__(self): return ' '.join(qs) -class DMLStatement(BaseCQLStatement): - """ mutation statements with ttls """ - - def __init__(self, table, consistency=None, where=None, ttl=None): - super(DMLStatement, self).__init__( - table, - consistency=consistency, - where=where) - self.ttl = ttl - - -class AssignmentStatement(DMLStatement): +class AssignmentStatement(BaseCQLStatement): """ value assignment statements """ def __init__(self, @@ -145,8 +140,8 @@ def __init__(self, table, consistency=consistency, where=where, - ttl=ttl ) + self.ttl = ttl # add assignments self.assignments = [] @@ -202,16 +197,24 @@ def __init__(self, table, assignments, consistency=None, where=None, ttl=None): ) -class DeleteStatement(DMLStatement): +class DeleteStatement(BaseCQLStatement): """ a cql delete statement """ - def __init__(self, table, fields, consistency=None, where=None, ttl=None): + def __init__(self, table, fields=None, consistency=None, where=None): super(DeleteStatement, self).__init__( table, consistency=consistency, where=where, - ttl=ttl ) self.fields = [fields] if isinstance(fields, basestring) else (fields or []) + def __unicode__(self): + qs = ['DELETE'] + qs += [', '.join(['"{}"'.format(f) for f in self.fields]) if self.fields else '*'] + qs += ['FROM', self.table] + + if self.where_clauses: + qs += [self._where] + + return ' '.join(qs) From 7771ceabd72c862ac10f3df74836005f26bf3f38 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Fri, 25 Oct 2013 13:48:31 -0700 Subject: [PATCH 0472/3726] adding test around delete statement --- .../tests/statements/test_delete_statement.py | 38 +++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/cqlengine/tests/statements/test_delete_statement.py b/cqlengine/tests/statements/test_delete_statement.py index e69de29bb2..da24b410f7 100644 --- a/cqlengine/tests/statements/test_delete_statement.py +++ b/cqlengine/tests/statements/test_delete_statement.py @@ -0,0 +1,38 @@ +from unittest import TestCase +from cqlengine.statements import DeleteStatement, WhereClause +from cqlengine.operators import * + + +class DeleteStatementTests(TestCase): + + def test_single_field_is_listified(self): + """ tests that passing a string field into the constructor puts it into a list """ + ds = DeleteStatement('table', 'field') + self.assertEqual(ds.fields, ['field']) + + def test_field_rendering(self): + """ tests that fields are properly added to the select statement """ + ds = DeleteStatement('table', ['f1', 'f2']) + self.assertTrue(unicode(ds).startswith('DELETE "f1", "f2"'), unicode(ds)) + self.assertTrue(str(ds).startswith('DELETE "f1", "f2"'), str(ds)) + + def test_none_fields_rendering(self): + """ tests that a '*' is added if no fields are passed in """ + ds = DeleteStatement('table', None) + self.assertTrue(unicode(ds).startswith('DELETE *'), unicode(ds)) + self.assertTrue(str(ds).startswith('DELETE *'), str(ds)) + + def test_table_rendering(self): + ds = DeleteStatement('table', None) + self.assertTrue(unicode(ds).startswith('DELETE * FROM table'), unicode(ds)) + self.assertTrue(str(ds).startswith('DELETE * FROM table'), str(ds)) + + def test_where_clause_rendering(self): + ds = DeleteStatement('table', None) + ds.add_where_clause(WhereClause('a', EqualsOperator(), 'b')) + self.assertEqual(unicode(ds), 'DELETE * FROM table WHERE "a" = 0', unicode(ds)) + + def test_context(self): + ds = DeleteStatement('table', None) + ds.add_where_clause(WhereClause('a', EqualsOperator(), 'b')) + self.assertEqual(ds.get_context(), {0: 'b'}) From ff1184080ceacfab6b9b18a975df8a3ee0d8dca5 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Fri, 25 Oct 2013 13:48:38 -0700 Subject: [PATCH 0473/3726] adding context test to select statement --- cqlengine/tests/statements/test_select_statement.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/cqlengine/tests/statements/test_select_statement.py b/cqlengine/tests/statements/test_select_statement.py index 13ed3d5005..25563c795d 100644 --- a/cqlengine/tests/statements/test_select_statement.py +++ b/cqlengine/tests/statements/test_select_statement.py @@ -32,6 +32,11 @@ def test_where_clause_rendering(self): ss.add_where_clause(WhereClause('a', EqualsOperator(), 'b')) self.assertEqual(unicode(ss), 'SELECT * FROM table WHERE "a" = b', unicode(ss)) + def test_context(self): + ss = SelectStatement('table', None) + ss.add_where_clause(WhereClause('a', EqualsOperator(), 'b')) + self.assertEqual(ss.get_context(), {0: 'b'}) + def test_additional_rendering(self): ss = SelectStatement( 'table', From 2b7dd6f22011a45f8d5dec3a6fd6e6f516a608ee Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Fri, 25 Oct 2013 13:54:20 -0700 Subject: [PATCH 0474/3726] adding methods to the base assignment class --- cqlengine/statements.py | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/cqlengine/statements.py b/cqlengine/statements.py index 7278b8c434..387b61d714 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -64,6 +64,11 @@ def __init__(self, table, consistency=None, where=None): self.add_where_clause(clause) def add_where_clause(self, clause): + """ + adds a where clause to this statement + :param clause: the clause to add + :type clause: WhereClause + """ if not isinstance(clause, WhereClause): raise StatementException("only instances of WhereClause can be added to statements") clause.set_context_id(self.context_counter) @@ -71,6 +76,10 @@ def add_where_clause(self, clause): self.where_clauses.append(clause) def get_context(self): + """ + returns the context dict for this statement + :rtype: dict + """ ctx = {} for clause in self.where_clauses or []: clause.update_context(ctx) @@ -132,7 +141,7 @@ class AssignmentStatement(BaseCQLStatement): def __init__(self, table, - assignments, + assignments=None, consistency=None, where=None, ttl=None): @@ -149,12 +158,23 @@ def __init__(self, self.add_assignment_clause(assignment) def add_assignment_clause(self, clause): + """ + adds an assignment clause to this statement + :param clause: the clause to add + :type clause: AssignmentClause + """ if not isinstance(clause, AssignmentClause): raise StatementException("only instances of AssignmentClause can be added to statements") clause.set_context_id(self.context_counter) self.context_counter += clause.get_context_size() self.assignments.append(clause) + def get_context(self): + ctx = super(AssignmentStatement, self).get_context() + for clause in self.assignments: + clause.update_context(ctx) + return ctx + class InsertStatement(AssignmentStatement): """ an cql insert select statement """ From cc1533f099fab5d1ddebfa8cb40cd4951b56bf41 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Fri, 25 Oct 2013 14:16:44 -0700 Subject: [PATCH 0475/3726] implementing update statement and insert ttl --- cqlengine/statements.py | 31 +++++++++++++++---------------- 1 file changed, 15 insertions(+), 16 deletions(-) diff --git a/cqlengine/statements.py b/cqlengine/statements.py index 387b61d714..36ef3bb4c9 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -179,14 +179,6 @@ def get_context(self): class InsertStatement(AssignmentStatement): """ an cql insert select statement """ - def __init__(self, table, assignments, consistency=None): - super(InsertStatement, self).__init__( - table, - assignments, - consistency=consistency, - where=None - ) - def add_where_clause(self, clause): raise StatementException("Cannot add where clauses to insert statements") @@ -201,20 +193,27 @@ def __unicode__(self): qs += ['VALUES'] qs += ["({})".format(', '.join([':{}'.format(v) for v in values]))] + if self.ttl: + qs += ["USING TTL {}".format(self.ttl)] + return ' '.join(qs) class UpdateStatement(AssignmentStatement): """ an cql update select statement """ - def __init__(self, table, assignments, consistency=None, where=None, ttl=None): - super(UpdateStatement, self).__init__( - table, - assignments, - consistency=consistency, - where=where, - ttl=ttl - ) + def __unicode__(self): + qs = ['UPDATE', self.table] + qs += 'SET' + qs += ', '.join([unicode(c) for c in self.assignments]) + + if self.where_clauses: + qs += [self._where] + + if self.ttl: + qs += ["USING TTL {}".format(self.ttl)] + + return ' '.join(qs) class DeleteStatement(BaseCQLStatement): From 70e446631b903a835994f2715bc8c583e991acd1 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Fri, 25 Oct 2013 14:16:55 -0700 Subject: [PATCH 0476/3726] adding test around insert ttl --- cqlengine/tests/statements/test_insert_statement.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/cqlengine/tests/statements/test_insert_statement.py b/cqlengine/tests/statements/test_insert_statement.py index 093e799923..51280a314f 100644 --- a/cqlengine/tests/statements/test_insert_statement.py +++ b/cqlengine/tests/statements/test_insert_statement.py @@ -21,4 +21,7 @@ def test_statement(self): ) def test_additional_rendering(self): - self.fail("Implement ttl and consistency") + ist = InsertStatement('table', ttl=60) + ist.add_assignment_clause(AssignmentClause('a', 'b')) + ist.add_assignment_clause(AssignmentClause('c', 'd')) + self.assertIn('USING TTL 60', unicode(ist)) From 89c20ef6c5e1d7717144c953583f1f2359321641 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Fri, 25 Oct 2013 14:25:30 -0700 Subject: [PATCH 0477/3726] adding tests around update and related debugging --- cqlengine/statements.py | 15 ++++++++--- .../tests/statements/test_update_statement.py | 26 +++++++++++++++++++ 2 files changed, 38 insertions(+), 3 deletions(-) diff --git a/cqlengine/statements.py b/cqlengine/statements.py index 36ef3bb4c9..110fc94dc2 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -11,6 +11,9 @@ def __init__(self, field, value): self.value = value self.context_id = None + def __unicode__(self): + raise NotImplementedError + def __str__(self): return str(unicode(self)) @@ -40,12 +43,15 @@ def __init__(self, field, operator, value): self.operator = operator def __unicode__(self): - return u'"{}" {} {}'.format(self.field, self.operator, self.context_id) + return u'"{}" {} :{}'.format(self.field, self.operator, self.context_id) class AssignmentClause(BaseClause): """ a single variable st statement """ + def __unicode__(self): + return u'"{}" = :{}'.format(self.field, self.context_id) + def insert_tuple(self): return self.field, self.context_id @@ -85,6 +91,9 @@ def get_context(self): clause.update_context(ctx) return ctx + def __unicode__(self): + raise NotImplementedError + def __str__(self): return str(unicode(self)) @@ -204,8 +213,8 @@ class UpdateStatement(AssignmentStatement): def __unicode__(self): qs = ['UPDATE', self.table] - qs += 'SET' - qs += ', '.join([unicode(c) for c in self.assignments]) + qs += ['SET'] + qs += [', '.join([unicode(c) for c in self.assignments])] if self.where_clauses: qs += [self._where] diff --git a/cqlengine/tests/statements/test_update_statement.py b/cqlengine/tests/statements/test_update_statement.py index e69de29bb2..1dabbfe3c7 100644 --- a/cqlengine/tests/statements/test_update_statement.py +++ b/cqlengine/tests/statements/test_update_statement.py @@ -0,0 +1,26 @@ +from unittest import TestCase +from cqlengine.statements import UpdateStatement, WhereClause, AssignmentClause +from cqlengine.operators import * + + +class UpdateStatementTests(TestCase): + + def test_table_rendering(self): + """ tests that fields are properly added to the select statement """ + us = UpdateStatement('table') + self.assertTrue(unicode(us).startswith('UPDATE table SET'), unicode(us)) + self.assertTrue(str(us).startswith('UPDATE table SET'), str(us)) + + def test_rendering(self): + us = UpdateStatement('table') + us.add_assignment_clause(AssignmentClause('a', 'b')) + us.add_assignment_clause(AssignmentClause('c', 'd')) + us.add_where_clause(WhereClause('a', EqualsOperator(), 'x')) + self.assertEqual(unicode(us), 'UPDATE table SET "a" = :0, "c" = :1 WHERE "a" = :2', unicode(us)) + + def test_context(self): + us = UpdateStatement('table') + us.add_assignment_clause(AssignmentClause('a', 'b')) + us.add_assignment_clause(AssignmentClause('c', 'd')) + us.add_where_clause(WhereClause('a', EqualsOperator(), 'x')) + self.assertEqual(us.get_context(), {0: 'b', 1: 'd', 2: 'x'}) From a7c250561ff8acd10c96d523fa472972e3105da8 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Fri, 25 Oct 2013 14:37:15 -0700 Subject: [PATCH 0478/3726] adding test around update statement ttls --- cqlengine/models.py | 1 + cqlengine/tests/statements/test_update_statement.py | 7 +++++++ 2 files changed, 8 insertions(+) diff --git a/cqlengine/models.py b/cqlengine/models.py index 21f78c2638..b6da73c8ba 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -69,6 +69,7 @@ def __call__(self, *args, **kwargs): """ raise NotImplementedError + class TTLDescriptor(object): """ returns a query set descriptor diff --git a/cqlengine/tests/statements/test_update_statement.py b/cqlengine/tests/statements/test_update_statement.py index 1dabbfe3c7..6c633852d1 100644 --- a/cqlengine/tests/statements/test_update_statement.py +++ b/cqlengine/tests/statements/test_update_statement.py @@ -24,3 +24,10 @@ def test_context(self): us.add_assignment_clause(AssignmentClause('c', 'd')) us.add_where_clause(WhereClause('a', EqualsOperator(), 'x')) self.assertEqual(us.get_context(), {0: 'b', 1: 'd', 2: 'x'}) + + def test_additional_rendering(self): + us = UpdateStatement('table', ttl=60) + us.add_assignment_clause(AssignmentClause('a', 'b')) + us.add_where_clause(WhereClause('a', EqualsOperator(), 'x')) + self.assertIn('USING TTL 60', unicode(us)) + From 1ea0f057fac7b398959dcabf9d551c0d1765c51a Mon Sep 17 00:00:00 2001 From: Dvir Volk Date: Sat, 26 Oct 2013 00:50:18 +0300 Subject: [PATCH 0479/3726] Added __repr__ to model instances for prettly logging --- cqlengine/models.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/cqlengine/models.py b/cqlengine/models.py index 21f78c2638..be48ca20de 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -247,6 +247,17 @@ def __init__(self, **values): self._is_persisted = False self._batch = None + + def __repr__(self): + """ + Pretty printing of models by their primary key + """ + return '{} <{}>'.format(self.__class__.__name__, + ', '.join(('{}={}'.format(k, getattr(self, k)) for k,v in self._primary_keys.iteritems())) + ) + + + @classmethod def _discover_polymorphic_submodels(cls): if not cls._is_polymorphic_base: From 09d440052963b70fd4c4232551d3dbbeed773bd3 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Fri, 25 Oct 2013 15:00:05 -0700 Subject: [PATCH 0480/3726] removing dml statement tests --- cqlengine/tests/statements/test_dml_statement.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 cqlengine/tests/statements/test_dml_statement.py diff --git a/cqlengine/tests/statements/test_dml_statement.py b/cqlengine/tests/statements/test_dml_statement.py deleted file mode 100644 index e69de29bb2..0000000000 From 1bc88c547c23ede7a5ac3808ee4840944fa77f62 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Fri, 25 Oct 2013 15:03:27 -0700 Subject: [PATCH 0481/3726] version bump --- cqlengine/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cqlengine/VERSION b/cqlengine/VERSION index b63ba696b7..f374f6662e 100644 --- a/cqlengine/VERSION +++ b/cqlengine/VERSION @@ -1 +1 @@ -0.9 +0.9.1 From 7b24b93a53e3f67e0df0a426b668d0c51bdd246b Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Fri, 25 Oct 2013 15:05:48 -0700 Subject: [PATCH 0482/3726] adding count and fixing select tests --- cqlengine/statements.py | 7 ++++++- .../tests/statements/test_select_statement.py | 17 ++++++++++------- 2 files changed, 16 insertions(+), 8 deletions(-) diff --git a/cqlengine/statements.py b/cqlengine/statements.py index 110fc94dc2..7a172bdc16 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -108,6 +108,7 @@ class SelectStatement(BaseCQLStatement): def __init__(self, table, fields=None, + count=False, consistency=None, where=None, order_by=None, @@ -121,13 +122,17 @@ def __init__(self, ) self.fields = [fields] if isinstance(fields, basestring) else (fields or []) + self.count = count self.order_by = [order_by] if isinstance(order_by, basestring) else order_by self.limit = limit self.allow_filtering = allow_filtering def __unicode__(self): qs = ['SELECT'] - qs += [', '.join(['"{}"'.format(f) for f in self.fields]) if self.fields else '*'] + if self.count: + qs += ['COUNT(*)'] + else: + qs += [', '.join(['"{}"'.format(f) for f in self.fields]) if self.fields else '*'] qs += ['FROM', self.table] if self.where_clauses: diff --git a/cqlengine/tests/statements/test_select_statement.py b/cqlengine/tests/statements/test_select_statement.py index 25563c795d..4150ef3b24 100644 --- a/cqlengine/tests/statements/test_select_statement.py +++ b/cqlengine/tests/statements/test_select_statement.py @@ -18,22 +18,27 @@ def test_field_rendering(self): def test_none_fields_rendering(self): """ tests that a '*' is added if no fields are passed in """ - ss = SelectStatement('table', None) + ss = SelectStatement('table') self.assertTrue(unicode(ss).startswith('SELECT *'), unicode(ss)) self.assertTrue(str(ss).startswith('SELECT *'), str(ss)) def test_table_rendering(self): - ss = SelectStatement('table', None) + ss = SelectStatement('table') self.assertTrue(unicode(ss).startswith('SELECT * FROM table'), unicode(ss)) self.assertTrue(str(ss).startswith('SELECT * FROM table'), str(ss)) def test_where_clause_rendering(self): - ss = SelectStatement('table', None) + ss = SelectStatement('table') ss.add_where_clause(WhereClause('a', EqualsOperator(), 'b')) - self.assertEqual(unicode(ss), 'SELECT * FROM table WHERE "a" = b', unicode(ss)) + self.assertEqual(unicode(ss), 'SELECT * FROM table WHERE "a" = :0', unicode(ss)) + + def test_count(self): + ss = SelectStatement('table', count=True) + ss.add_where_clause(WhereClause('a', EqualsOperator(), 'b')) + self.assertEqual(unicode(ss), 'SELECT COUNT(*) FROM table WHERE "a" = :0', unicode(ss)) def test_context(self): - ss = SelectStatement('table', None) + ss = SelectStatement('table') ss.add_where_clause(WhereClause('a', EqualsOperator(), 'b')) self.assertEqual(ss.get_context(), {0: 'b'}) @@ -50,5 +55,3 @@ def test_additional_rendering(self): self.assertIn('ORDER BY x, y', qstr) self.assertIn('ALLOW FILTERING', qstr) - self.fail("Implement ttl and consistency") - From 80eba6678937a617ad62b3adf936edbf964a7998 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Fri, 25 Oct 2013 15:06:35 -0700 Subject: [PATCH 0483/3726] adding context placeholders to some where tests --- cqlengine/tests/statements/test_delete_statement.py | 2 +- cqlengine/tests/statements/test_where_clause.py | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/cqlengine/tests/statements/test_delete_statement.py b/cqlengine/tests/statements/test_delete_statement.py index da24b410f7..58d05c867e 100644 --- a/cqlengine/tests/statements/test_delete_statement.py +++ b/cqlengine/tests/statements/test_delete_statement.py @@ -30,7 +30,7 @@ def test_table_rendering(self): def test_where_clause_rendering(self): ds = DeleteStatement('table', None) ds.add_where_clause(WhereClause('a', EqualsOperator(), 'b')) - self.assertEqual(unicode(ds), 'DELETE * FROM table WHERE "a" = 0', unicode(ds)) + self.assertEqual(unicode(ds), 'DELETE * FROM table WHERE "a" = :0', unicode(ds)) def test_context(self): ds = DeleteStatement('table', None) diff --git a/cqlengine/tests/statements/test_where_clause.py b/cqlengine/tests/statements/test_where_clause.py index 353e506fd7..66defeadeb 100644 --- a/cqlengine/tests/statements/test_where_clause.py +++ b/cqlengine/tests/statements/test_where_clause.py @@ -13,5 +13,6 @@ def test_operator_check(self): def test_where_clause_rendering(self): """ tests that where clauses are rendered properly """ wc = WhereClause('a', EqualsOperator(), 'c') - self.assertEqual('"a" = c', unicode(wc)) - self.assertEqual('"a" = c', str(wc)) + wc.set_context_id(5) + self.assertEqual('"a" = :5', unicode(wc)) + self.assertEqual('"a" = :5', str(wc)) From a3df87c558bfc6164544e0e747c91c953ea64265 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Fri, 25 Oct 2013 15:59:14 -0700 Subject: [PATCH 0484/3726] replacing select query with statement object --- cqlengine/query.py | 64 +++++++++++++++++++++------------------------- 1 file changed, 29 insertions(+), 35 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index a542c303d2..2e0ac06b6d 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -12,6 +12,8 @@ from cqlengine.exceptions import CQLEngineException, ValidationError from cqlengine.functions import QueryValue, Token +from cqlengine import statements, operators + #CQL 3 reference: #http://www.datastax.com/docs/1.1/references/cql/index @@ -326,41 +328,22 @@ def __len__(self): #----query generation / execution---- - def _where_clause(self): - """ Returns a where clause based on the given filter args """ - return ' AND '.join([f.cql for f in self._where]) - - def _where_values(self): - """ Returns the value dict to be passed to the cql query """ - values = {} - for where in self._where: - values.update(where.get_dict()) - return values - - def _get_select_statement(self): - """ returns the select portion of this queryset's cql statement """ - raise NotImplementedError + def _select_fields(self): + """ returns the fields to select """ + return [] def _select_query(self): """ Returns a select clause based on the given filter args """ - qs = [self._get_select_statement()] - qs += ['FROM {}'.format(self.column_family_name)] - - if self._where: - qs += ['WHERE {}'.format(self._where_clause())] - - if self._order: - qs += ['ORDER BY {}'.format(', '.join(self._order))] - - if self._limit: - qs += ['LIMIT {}'.format(self._limit)] - - if self._allow_filtering: - qs += ['ALLOW FILTERING'] - - return ' '.join(qs) + return statements.SelectStatement( + self.column_family_name, + fields=self._select_fields(), + where=self._where, + order_by=self._order, + limit=self._limit, + allow_filtering=self._allow_filtering + ) #----Reads------ @@ -368,7 +351,8 @@ def _execute_query(self): if self._batch: raise CQLEngineException("Only inserts, updates, and deletes are available in batch mode") if self._result_cache is None: - columns, self._result_cache = execute(self._select_query(), self._where_values(), self._consistency) + query = self._select_query() + columns, self._result_cache = execute(unicode(query).encode('utf-8'), query.get_context(), self._consistency) self._construct_result = self._get_result_constructor(columns) def _fill_result_cache_to_idx(self, idx): @@ -477,7 +461,7 @@ def filter(self, *args, **kwargs): #add arguments to the where clause filters clone = copy.deepcopy(self) for operator in args: - if not isinstance(operator, QueryOperator): + if not isinstance(operator, statements.WhereClause): raise QueryException('{} is not a valid query operator'.format(operator)) clone._where.append(operator) @@ -493,10 +477,10 @@ def filter(self, *args, **kwargs): raise QueryException("Can't resolve column name: '{}'".format(col_name)) #get query operator, or use equals if not supplied - operator_class = QueryOperator.get_operator(col_op or 'EQ') - operator = operator_class(column, val) + operator_class = operators.BaseWhereOperator.get_operator(col_op or 'EQ') + operator = operator_class() - clone._where.append(operator) + clone._where.append(statements.WhereClause(col_name, operator, val)) return clone @@ -717,6 +701,16 @@ def _get_select_statement(self): else: return 'SELECT *' + def _select_fields(self): + if self._defer_fields or self._only_fields: + fields = self.model._columns.keys() + if self._defer_fields: + fields = [f for f in fields if f not in self._defer_fields] + elif self._only_fields: + fields = self._only_fields + return [self.model._columns[f].db_field_name for f in fields] + return super(ModelQuerySet, self)._select_fields() + def _get_result_constructor(self, names): """ Returns a function that will be used to instantiate query results """ if not self._values_list: From 181991233389aed18e867d46c616fbc53a4c86ee Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Fri, 25 Oct 2013 16:01:12 -0700 Subject: [PATCH 0485/3726] fixing operator query objects --- cqlengine/query.py | 12 ++++++------ cqlengine/tests/query/test_queryset.py | 16 ++++++++++++---- 2 files changed, 18 insertions(+), 10 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index 2e0ac06b6d..bb2f0e82a7 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -181,22 +181,22 @@ def in_(self, item): used in where you'd typically want to use python's `in` operator """ - return InOperator(self._get_column(), item) + return statements.WhereClause(self._get_column(), operators.InOperator(), item) def __eq__(self, other): - return EqualsOperator(self._get_column(), other) + return statements.WhereClause(self._get_column(), operators.EqualsOperator(), other) def __gt__(self, other): - return GreaterThanOperator(self._get_column(), other) + return statements.WhereClause(self._get_column(), operators.GreaterThanOperator(), other) def __ge__(self, other): - return GreaterThanOrEqualOperator(self._get_column(), other) + return statements.WhereClause(self._get_column(), operators.GreaterThanOrEqualOperator(), other) def __lt__(self, other): - return LessThanOperator(self._get_column(), other) + return statements.WhereClause(self._get_column(), operators.LessThanOperator(), other) def __le__(self, other): - return LessThanOrEqualOperator(self._get_column(), other) + return statements.WhereClause(self._get_column(), operators.LessThanOrEqualOperator(), other) class BatchType(object): diff --git a/cqlengine/tests/query/test_queryset.py b/cqlengine/tests/query/test_queryset.py index 4fe258178a..2ae18a07f7 100644 --- a/cqlengine/tests/query/test_queryset.py +++ b/cqlengine/tests/query/test_queryset.py @@ -14,6 +14,9 @@ from datetime import timedelta from datetime import tzinfo +from cqlengine import statements +from cqlengine import operators + class TzOffset(tzinfo): """Minimal implementation of a timezone offset to help testing with timezone @@ -61,14 +64,17 @@ def test_query_filter_parsing(self): assert len(query1._where) == 1 op = query1._where[0] - assert isinstance(op, query.EqualsOperator) + + assert isinstance(op, statements.WhereClause) + assert isinstance(op.operator, operators.EqualsOperator) assert op.value == 5 query2 = query1.filter(expected_result__gte=1) assert len(query2._where) == 2 op = query2._where[1] - assert isinstance(op, query.GreaterThanOrEqualOperator) + self.assertIsInstance(op, statements.WhereClause) + self.assertIsInstance(op.operator, operators.GreaterThanOrEqualOperator) assert op.value == 1 def test_query_expression_parsing(self): @@ -77,14 +83,16 @@ def test_query_expression_parsing(self): assert len(query1._where) == 1 op = query1._where[0] - assert isinstance(op, query.EqualsOperator) + assert isinstance(op, statements.WhereClause) + assert isinstance(op.operator, operators.EqualsOperator) assert op.value == 5 query2 = query1.filter(TestModel.expected_result >= 1) assert len(query2._where) == 2 op = query2._where[1] - assert isinstance(op, query.GreaterThanOrEqualOperator) + self.assertIsInstance(op, statements.WhereClause) + self.assertIsInstance(op.operator, operators.GreaterThanOrEqualOperator) assert op.value == 1 def test_using_invalid_column_names_in_filter_kwargs_raises_error(self): From 74ed4f4d001e6a1e31bb49e8e479f826c4adc45e Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Fri, 25 Oct 2013 16:02:00 -0700 Subject: [PATCH 0486/3726] removing obsolete tests --- cqlengine/tests/query/test_queryset.py | 33 -------------------------- 1 file changed, 33 deletions(-) diff --git a/cqlengine/tests/query/test_queryset.py b/cqlengine/tests/query/test_queryset.py index 2ae18a07f7..8017c1578a 100644 --- a/cqlengine/tests/query/test_queryset.py +++ b/cqlengine/tests/query/test_queryset.py @@ -116,39 +116,6 @@ def test_using_non_query_operators_in_query_args_raises_error(self): with self.assertRaises(query.QueryException): TestModel.objects(5) - def test_filter_method_where_clause_generation(self): - """ - Tests the where clause creation - """ - query1 = TestModel.objects(test_id=5) - ids = [o.query_value.identifier for o in query1._where] - where = query1._where_clause() - assert where == '"test_id" = :{}'.format(*ids) - - query2 = query1.filter(expected_result__gte=1) - ids = [o.query_value.identifier for o in query2._where] - where = query2._where_clause() - assert where == '"test_id" = :{} AND "expected_result" >= :{}'.format(*ids) - - def test_query_expression_where_clause_generation(self): - """ - Tests the where clause creation - """ - query1 = TestModel.objects(TestModel.test_id == 5) - ids = [o.query_value.identifier for o in query1._where] - where = query1._where_clause() - assert where == '"test_id" = :{}'.format(*ids) - - query2 = query1.filter(TestModel.expected_result >= 1) - ids = [o.query_value.identifier for o in query2._where] - where = query2._where_clause() - assert where == '"test_id" = :{} AND "expected_result" >= :{}'.format(*ids) - - def test_querystring_generation(self): - """ - Tests the select querystring creation - """ - def test_queryset_is_immutable(self): """ Tests that calling a queryset function that changes it's state returns a new queryset From 181002486366eb00277a723daa9446f416d1025d Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Fri, 25 Oct 2013 17:16:43 -0700 Subject: [PATCH 0487/3726] getting basic select queries working properly --- cqlengine/models.py | 4 ++++ cqlengine/named.py | 4 ++++ cqlengine/query.py | 18 ++++++++++++------ cqlengine/statements.py | 2 +- 4 files changed, 21 insertions(+), 7 deletions(-) diff --git a/cqlengine/models.py b/cqlengine/models.py index b6da73c8ba..19a05f73a7 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -129,7 +129,11 @@ class ColumnQueryEvaluator(AbstractQueryableColumn): def __init__(self, column): self.column = column + def __unicode__(self): + return self.column.db_field_name + def _get_column(self): + """ :rtype: ColumnQueryEvaluator """ return self.column diff --git a/cqlengine/named.py b/cqlengine/named.py index c1d1263a82..2b75443144 100644 --- a/cqlengine/named.py +++ b/cqlengine/named.py @@ -33,7 +33,11 @@ class NamedColumn(AbstractQueryableColumn): def __init__(self, name): self.name = name + def __unicode__(self): + return self.name + def _get_column(self): + """ :rtype: NamedColumn """ return self @property diff --git a/cqlengine/query.py b/cqlengine/query.py index bb2f0e82a7..80201a5ac1 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -175,28 +175,34 @@ class AbstractQueryableColumn(object): def _get_column(self): raise NotImplementedError + def __unicode__(self): + raise NotImplementedError + + def __str__(self): + return str(unicode(self)) + def in_(self, item): """ Returns an in operator used in where you'd typically want to use python's `in` operator """ - return statements.WhereClause(self._get_column(), operators.InOperator(), item) + return statements.WhereClause(unicode(self), operators.InOperator(), item) def __eq__(self, other): - return statements.WhereClause(self._get_column(), operators.EqualsOperator(), other) + return statements.WhereClause(unicode(self), operators.EqualsOperator(), other) def __gt__(self, other): - return statements.WhereClause(self._get_column(), operators.GreaterThanOperator(), other) + return statements.WhereClause(unicode(self), operators.GreaterThanOperator(), other) def __ge__(self, other): - return statements.WhereClause(self._get_column(), operators.GreaterThanOrEqualOperator(), other) + return statements.WhereClause(unicode(self), operators.GreaterThanOrEqualOperator(), other) def __lt__(self, other): - return statements.WhereClause(self._get_column(), operators.LessThanOperator(), other) + return statements.WhereClause(unicode(self), operators.LessThanOperator(), other) def __le__(self, other): - return statements.WhereClause(self._get_column(), operators.LessThanOrEqualOperator(), other) + return statements.WhereClause(unicode(self), operators.LessThanOrEqualOperator(), other) class BatchType(object): diff --git a/cqlengine/statements.py b/cqlengine/statements.py index 7a172bdc16..9aa6e7febf 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -28,7 +28,7 @@ def set_context_id(self, i): def update_context(self, ctx): """ updates the query context with this clauses values """ assert isinstance(ctx, dict) - ctx[self.context_id] = self.value + ctx[str(self.context_id)] = self.value class WhereClause(BaseClause): From 49e58cf9673642c1dc24ddd9c602cebf1a7c4489 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Fri, 25 Oct 2013 17:16:53 -0700 Subject: [PATCH 0488/3726] ensure last item is removed properly --- cqlengine/tests/columns/test_container_columns.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/cqlengine/tests/columns/test_container_columns.py b/cqlengine/tests/columns/test_container_columns.py index 0d3dc524b2..9fbef0d805 100644 --- a/cqlengine/tests/columns/test_container_columns.py +++ b/cqlengine/tests/columns/test_container_columns.py @@ -51,6 +51,17 @@ def test_empty_set_initial(self): m.int_set.add(5) m.save() + def test_deleting_last_item_should_succeed(self): + m = TestSetModel.create() + m.int_set.add(5) + m.save() + m.int_set.remove(5) + m.save() + + m = TestSetModel.get(partition=m.partition) + self.assertNotIn(5, m.int_set) + + def test_empty_set_retrieval(self): m = TestSetModel.create() m2 = TestSetModel.get(partition=m.partition) From 647ebaf5f078508f6e7682420b23b9d24c009a22 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Fri, 25 Oct 2013 17:20:50 -0700 Subject: [PATCH 0489/3726] modifying statement, clause, and operator stringifying to encode the object's unicode --- cqlengine/operators.py | 2 +- cqlengine/statements.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cqlengine/operators.py b/cqlengine/operators.py index cbbfe4ad66..4fef0a6b5d 100644 --- a/cqlengine/operators.py +++ b/cqlengine/operators.py @@ -15,7 +15,7 @@ def __unicode__(self): return self.cql_symbol def __str__(self): - return str(unicode(self)) + return unicode(self).encode('utf-8') @classmethod def get_operator(cls, symbol): diff --git a/cqlengine/statements.py b/cqlengine/statements.py index 9aa6e7febf..45124c76b4 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -15,7 +15,7 @@ def __unicode__(self): raise NotImplementedError def __str__(self): - return str(unicode(self)) + return unicode(self).encode('utf-8') def get_context_size(self): """ returns the number of entries this clause will add to the query context """ @@ -95,7 +95,7 @@ def __unicode__(self): raise NotImplementedError def __str__(self): - return str(unicode(self)) + return unicode(self).encode('utf-8') @property def _where(self): From ec2337858c011799d796bb7405af548828d3a0a4 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Fri, 25 Oct 2013 17:23:51 -0700 Subject: [PATCH 0490/3726] excluding order and limit from count queries --- cqlengine/statements.py | 4 ++-- cqlengine/tests/statements/test_select_statement.py | 4 +++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/cqlengine/statements.py b/cqlengine/statements.py index 45124c76b4..55dfa27757 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -138,10 +138,10 @@ def __unicode__(self): if self.where_clauses: qs += [self._where] - if self.order_by: + if self.order_by and not self.count: qs += ['ORDER BY {}'.format(', '.join(unicode(o) for o in self.order_by))] - if self.limit: + if self.limit and not self.count: qs += ['LIMIT {}'.format(self.limit)] if self.allow_filtering: diff --git a/cqlengine/tests/statements/test_select_statement.py b/cqlengine/tests/statements/test_select_statement.py index 4150ef3b24..495258c2fd 100644 --- a/cqlengine/tests/statements/test_select_statement.py +++ b/cqlengine/tests/statements/test_select_statement.py @@ -33,9 +33,11 @@ def test_where_clause_rendering(self): self.assertEqual(unicode(ss), 'SELECT * FROM table WHERE "a" = :0', unicode(ss)) def test_count(self): - ss = SelectStatement('table', count=True) + ss = SelectStatement('table', count=True, limit=10, order_by='d') ss.add_where_clause(WhereClause('a', EqualsOperator(), 'b')) self.assertEqual(unicode(ss), 'SELECT COUNT(*) FROM table WHERE "a" = :0', unicode(ss)) + self.assertNotIn('LIMIT', unicode(ss)) + self.assertNotIn('ORDER', unicode(ss)) def test_context(self): ss = SelectStatement('table') From 802cd08e09866bc78a725036398ccef84c50e55c Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Fri, 25 Oct 2013 17:28:12 -0700 Subject: [PATCH 0491/3726] making sure deletion works on the last item --- cqlengine/tests/columns/test_container_columns.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/cqlengine/tests/columns/test_container_columns.py b/cqlengine/tests/columns/test_container_columns.py index 9fbef0d805..b1a7268062 100644 --- a/cqlengine/tests/columns/test_container_columns.py +++ b/cqlengine/tests/columns/test_container_columns.py @@ -343,6 +343,18 @@ def test_empty_retrieve(self): tmp2 = TestMapModel.get(partition=tmp.partition) tmp2.int_map['blah'] = 1 + def test_remove_last_entry_works(self): + tmp = TestMapModel.create() + tmp.text_map["blah"] = datetime.now() + tmp.save() + del tmp.text_map["blah"] + tmp.save() + + tmp = TestMapModel.get(partition=tmp.partition) + self.assertNotIn("blah", tmp.int_map) + + + def test_io_success(self): """ Tests that a basic usage works as expected """ k1 = uuid4() From 3556c99724a59a899f0f33e686c0e2e552359c35 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 27 Oct 2013 16:22:31 -0700 Subject: [PATCH 0492/3726] refactoring count query --- cqlengine/query.py | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index 80201a5ac1..c011059867 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -539,18 +539,12 @@ def count(self): """ Returns the number of rows matched by this query """ if self._batch: raise CQLEngineException("Only inserts, updates, and deletes are available in batch mode") + #TODO: check for previous query execution and return row count if it exists if self._result_cache is None: - qs = ['SELECT COUNT(*)'] - qs += ['FROM {}'.format(self.column_family_name)] - if self._where: - qs += ['WHERE {}'.format(self._where_clause())] - if self._allow_filtering: - qs += ['ALLOW FILTERING'] - - qs = ' '.join(qs) - - _, result = execute(qs, self._where_values()) + query = self._select_query() + query.count = True + _, result = execute(str(query), query.get_context()) return result[0][0] else: return len(self._result_cache) From cbbef483b20d9613216d2c08326e18cfcdebbd39 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 27 Oct 2013 16:22:47 -0700 Subject: [PATCH 0493/3726] adding quoting wrapper --- cqlengine/statements.py | 43 +++++++++++++++++++++++++++++++++++++++-- 1 file changed, 41 insertions(+), 2 deletions(-) diff --git a/cqlengine/statements.py b/cqlengine/statements.py index 55dfa27757..a7bc9ca882 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -1,9 +1,42 @@ -from cqlengine.operators import BaseWhereOperator, BaseAssignmentOperator +from cqlengine.operators import BaseWhereOperator, InOperator class StatementException(Exception): pass +class ValueQuoter(object): + + def __init__(self, value): + self.value = value + + def __unicode__(self): + from cql.query import cql_quote + if isinstance(self.value, bool): + return 'true' if self.value else 'false' + elif isinstance(self.value, (list, tuple)): + return '[' + ', '.join([cql_quote(v) for v in self.value]) + ']' + elif isinstance(self.value, dict): + return '{' + ', '.join([cql_quote(k) + ':' + cql_quote(v) for k,v in self.value.items()]) + '}' + elif isinstance(self.value, set): + return '{' + ', '.join([cql_quote(v) for v in self.value]) + '}' + return cql_quote(self.value) + + def __eq__(self, other): + if isinstance(other, self.__class__): + return self.value == other.value + return False + + def __str__(self): + return unicode(self).encode('utf-8') + + +class InQuoter(ValueQuoter): + + def __unicode__(self): + from cql.query import cql_quote + return '(' + ', '.join([cql_quote(v) for v in self.value]) + ')' + + class BaseClause(object): def __init__(self, field, value): @@ -28,7 +61,7 @@ def set_context_id(self, i): def update_context(self, ctx): """ updates the query context with this clauses values """ assert isinstance(ctx, dict) - ctx[str(self.context_id)] = self.value + ctx[str(self.context_id)] = ValueQuoter(self.value) class WhereClause(BaseClause): @@ -45,6 +78,12 @@ def __init__(self, field, operator, value): def __unicode__(self): return u'"{}" {} :{}'.format(self.field, self.operator, self.context_id) + def update_context(self, ctx): + if isinstance(self.operator, InOperator): + ctx[str(self.context_id)] = InQuoter(self.value) + else: + super(WhereClause, self).update_context(ctx) + class AssignmentClause(BaseClause): """ a single variable st statement """ From 88804adaf9155d5cc3e4c327cf66d87670c16b00 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 27 Oct 2013 16:32:59 -0700 Subject: [PATCH 0494/3726] adding where clause equality method --- cqlengine/query.py | 1 - cqlengine/statements.py | 13 +++++++++++++ cqlengine/tests/statements/test_quoter.py | 0 cqlengine/tests/statements/test_where_clause.py | 6 ++++++ 4 files changed, 19 insertions(+), 1 deletion(-) create mode 100644 cqlengine/tests/statements/test_quoter.py diff --git a/cqlengine/query.py b/cqlengine/query.py index c011059867..464220a4a7 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -540,7 +540,6 @@ def count(self): if self._batch: raise CQLEngineException("Only inserts, updates, and deletes are available in batch mode") - #TODO: check for previous query execution and return row count if it exists if self._result_cache is None: query = self._select_query() query.count = True diff --git a/cqlengine/statements.py b/cqlengine/statements.py index a7bc9ca882..281701a971 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -50,6 +50,14 @@ def __unicode__(self): def __str__(self): return unicode(self).encode('utf-8') + def __eq__(self, other): + if isinstance(other, self.__class__): + return self.field == other.field and self.value == other.value + return False + + def __ne__(self, other): + return not self.__eq__(other) + def get_context_size(self): """ returns the number of entries this clause will add to the query context """ return 1 @@ -78,6 +86,11 @@ def __init__(self, field, operator, value): def __unicode__(self): return u'"{}" {} :{}'.format(self.field, self.operator, self.context_id) + def __eq__(self, other): + if super(WhereClause, self).__eq__(other): + return self.operator.__class__ == other.operator.__class__ + return False + def update_context(self, ctx): if isinstance(self.operator, InOperator): ctx[str(self.context_id)] = InQuoter(self.value) diff --git a/cqlengine/tests/statements/test_quoter.py b/cqlengine/tests/statements/test_quoter.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cqlengine/tests/statements/test_where_clause.py b/cqlengine/tests/statements/test_where_clause.py index 66defeadeb..938a2b4919 100644 --- a/cqlengine/tests/statements/test_where_clause.py +++ b/cqlengine/tests/statements/test_where_clause.py @@ -16,3 +16,9 @@ def test_where_clause_rendering(self): wc.set_context_id(5) self.assertEqual('"a" = :5', unicode(wc)) self.assertEqual('"a" = :5', str(wc)) + + def test_equality_method(self): + """ tests that 2 identical where clauses evaluate as == """ + wc1 = WhereClause('a', EqualsOperator(), 'c') + wc2 = WhereClause('a', EqualsOperator(), 'c') + assert wc1 == wc2 From 14008640fca758dabd07e95370a8166cb9121e95 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 27 Oct 2013 16:40:39 -0700 Subject: [PATCH 0495/3726] commenting out old operators --- cqlengine/query.py | 312 +++++++++++++++++++++++---------------------- 1 file changed, 157 insertions(+), 155 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index 464220a4a7..cd5d7cec52 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -12,159 +12,161 @@ from cqlengine.exceptions import CQLEngineException, ValidationError from cqlengine.functions import QueryValue, Token -from cqlengine import statements, operators - #CQL 3 reference: #http://www.datastax.com/docs/1.1/references/cql/index +from cqlengine.operators import InOperator, EqualsOperator, GreaterThanOperator, GreaterThanOrEqualOperator +from cqlengine.operators import LessThanOperator, LessThanOrEqualOperator, BaseWhereOperator +from cqlengine.statements import WhereClause, SelectStatement + class QueryException(CQLEngineException): pass class DoesNotExist(QueryException): pass class MultipleObjectsReturned(QueryException): pass -class QueryOperatorException(QueryException): pass - - -class QueryOperator(object): - # The symbol that identifies this operator in filter kwargs - # ie: colname__ - symbol = None - - # The comparator symbol this operator uses in cql - cql_symbol = None - - QUERY_VALUE_WRAPPER = QueryValue - - def __init__(self, column, value): - self.column = column - self.value = value - - if isinstance(value, QueryValue): - self.query_value = value - else: - self.query_value = self.QUERY_VALUE_WRAPPER(value) - - #perform validation on this operator - self.validate_operator() - self.validate_value() - - @property - def cql(self): - """ - Returns this operator's portion of the WHERE clause - """ - return '{} {} {}'.format(self.column.cql, self.cql_symbol, self.query_value.cql) - - def validate_operator(self): - """ - Checks that this operator can be used on the column provided - """ - if self.symbol is None: - raise QueryOperatorException( - "{} is not a valid operator, use one with 'symbol' defined".format( - self.__class__.__name__ - ) - ) - if self.cql_symbol is None: - raise QueryOperatorException( - "{} is not a valid operator, use one with 'cql_symbol' defined".format( - self.__class__.__name__ - ) - ) - - def validate_value(self): - """ - Checks that the compare value works with this operator - - Doesn't do anything by default - """ - pass - - def get_dict(self): - """ - Returns this operators contribution to the cql.query arg dictionanry - - ie: if this column's name is colname, and the identifier is colval, - this should return the dict: {'colval':} - SELECT * FROM column_family WHERE colname=:colval - """ - return self.query_value.get_dict(self.column) - - @classmethod - def get_operator(cls, symbol): - if not hasattr(cls, 'opmap'): - QueryOperator.opmap = {} - def _recurse(klass): - if klass.symbol: - QueryOperator.opmap[klass.symbol.upper()] = klass - for subklass in klass.__subclasses__(): - _recurse(subklass) - pass - _recurse(QueryOperator) - try: - return QueryOperator.opmap[symbol.upper()] - except KeyError: - raise QueryOperatorException("{} doesn't map to a QueryOperator".format(symbol)) - - # equality operator, used by tests - - def __eq__(self, op): - return self.__class__ is op.__class__ and \ - self.column.db_field_name == op.column.db_field_name and \ - self.value == op.value - - def __ne__(self, op): - return not (self == op) - - def __hash__(self): - return hash(self.column.db_field_name) ^ hash(self.value) - - -class EqualsOperator(QueryOperator): - symbol = 'EQ' - cql_symbol = '=' - - -class IterableQueryValue(QueryValue): - def __init__(self, value): - try: - super(IterableQueryValue, self).__init__(value, [uuid4().hex for i in value]) - except TypeError: - raise QueryException("in operator arguments must be iterable, {} found".format(value)) - - def get_dict(self, column): - return dict((i, column.to_database(v)) for (i, v) in zip(self.identifier, self.value)) - - def get_cql(self): - return '({})'.format(', '.join(':{}'.format(i) for i in self.identifier)) - - -class InOperator(EqualsOperator): - symbol = 'IN' - cql_symbol = 'IN' - - QUERY_VALUE_WRAPPER = IterableQueryValue - - -class GreaterThanOperator(QueryOperator): - symbol = "GT" - cql_symbol = '>' - - -class GreaterThanOrEqualOperator(QueryOperator): - symbol = "GTE" - cql_symbol = '>=' - - -class LessThanOperator(QueryOperator): - symbol = "LT" - cql_symbol = '<' - - -class LessThanOrEqualOperator(QueryOperator): - symbol = "LTE" - cql_symbol = '<=' - +# class QueryOperatorException(QueryException): pass +# +# +# class QueryOperator(object): +# # The symbol that identifies this operator in filter kwargs +# # ie: colname__ +# symbol = None +# +# # The comparator symbol this operator uses in cql +# cql_symbol = None +# +# QUERY_VALUE_WRAPPER = QueryValue +# +# def __init__(self, column, value): +# self.column = column +# self.value = value +# +# if isinstance(value, QueryValue): +# self.query_value = value +# else: +# self.query_value = self.QUERY_VALUE_WRAPPER(value) +# +# #perform validation on this operator +# self.validate_operator() +# self.validate_value() +# +# @property +# def cql(self): +# """ +# Returns this operator's portion of the WHERE clause +# """ +# return '{} {} {}'.format(self.column.cql, self.cql_symbol, self.query_value.cql) +# +# def validate_operator(self): +# """ +# Checks that this operator can be used on the column provided +# """ +# if self.symbol is None: +# raise QueryOperatorException( +# "{} is not a valid operator, use one with 'symbol' defined".format( +# self.__class__.__name__ +# ) +# ) +# if self.cql_symbol is None: +# raise QueryOperatorException( +# "{} is not a valid operator, use one with 'cql_symbol' defined".format( +# self.__class__.__name__ +# ) +# ) +# +# def validate_value(self): +# """ +# Checks that the compare value works with this operator +# +# Doesn't do anything by default +# """ +# pass +# +# def get_dict(self): +# """ +# Returns this operators contribution to the cql.query arg dictionanry +# +# ie: if this column's name is colname, and the identifier is colval, +# this should return the dict: {'colval':} +# SELECT * FROM column_family WHERE colname=:colval +# """ +# return self.query_value.get_dict(self.column) +# +# @classmethod +# def get_operator(cls, symbol): +# if not hasattr(cls, 'opmap'): +# QueryOperator.opmap = {} +# def _recurse(klass): +# if klass.symbol: +# QueryOperator.opmap[klass.symbol.upper()] = klass +# for subklass in klass.__subclasses__(): +# _recurse(subklass) +# pass +# _recurse(QueryOperator) +# try: +# return QueryOperator.opmap[symbol.upper()] +# except KeyError: +# raise QueryOperatorException("{} doesn't map to a QueryOperator".format(symbol)) +# +# # equality operator, used by tests +# +# def __eq__(self, op): +# return self.__class__ is op.__class__ and \ +# self.column.db_field_name == op.column.db_field_name and \ +# self.value == op.value +# +# def __ne__(self, op): +# return not (self == op) +# +# def __hash__(self): +# return hash(self.column.db_field_name) ^ hash(self.value) +# +# +# class EqualsOperator(QueryOperator): +# symbol = 'EQ' +# cql_symbol = '=' +# +# +# class IterableQueryValue(QueryValue): +# def __init__(self, value): +# try: +# super(IterableQueryValue, self).__init__(value, [uuid4().hex for i in value]) +# except TypeError: +# raise QueryException("in operator arguments must be iterable, {} found".format(value)) +# +# def get_dict(self, column): +# return dict((i, column.to_database(v)) for (i, v) in zip(self.identifier, self.value)) +# +# def get_cql(self): +# return '({})'.format(', '.join(':{}'.format(i) for i in self.identifier)) +# +# +# class InOperator(EqualsOperator): +# symbol = 'IN' +# cql_symbol = 'IN' +# +# QUERY_VALUE_WRAPPER = IterableQueryValue +# +# +# class GreaterThanOperator(QueryOperator): +# symbol = "GT" +# cql_symbol = '>' +# +# +# class GreaterThanOrEqualOperator(QueryOperator): +# symbol = "GTE" +# cql_symbol = '>=' +# +# +# class LessThanOperator(QueryOperator): +# symbol = "LT" +# cql_symbol = '<' +# +# +# class LessThanOrEqualOperator(QueryOperator): +# symbol = "LTE" +# cql_symbol = '<=' +# class AbstractQueryableColumn(object): """ @@ -187,22 +189,22 @@ def in_(self, item): used in where you'd typically want to use python's `in` operator """ - return statements.WhereClause(unicode(self), operators.InOperator(), item) + return WhereClause(unicode(self), InOperator(), item) def __eq__(self, other): - return statements.WhereClause(unicode(self), operators.EqualsOperator(), other) + return WhereClause(unicode(self), EqualsOperator(), other) def __gt__(self, other): - return statements.WhereClause(unicode(self), operators.GreaterThanOperator(), other) + return WhereClause(unicode(self), GreaterThanOperator(), other) def __ge__(self, other): - return statements.WhereClause(unicode(self), operators.GreaterThanOrEqualOperator(), other) + return WhereClause(unicode(self), GreaterThanOrEqualOperator(), other) def __lt__(self, other): - return statements.WhereClause(unicode(self), operators.LessThanOperator(), other) + return WhereClause(unicode(self), LessThanOperator(), other) def __le__(self, other): - return statements.WhereClause(unicode(self), operators.LessThanOrEqualOperator(), other) + return WhereClause(unicode(self), LessThanOrEqualOperator(), other) class BatchType(object): @@ -342,7 +344,7 @@ def _select_query(self): """ Returns a select clause based on the given filter args """ - return statements.SelectStatement( + return SelectStatement( self.column_family_name, fields=self._select_fields(), where=self._where, @@ -467,7 +469,7 @@ def filter(self, *args, **kwargs): #add arguments to the where clause filters clone = copy.deepcopy(self) for operator in args: - if not isinstance(operator, statements.WhereClause): + if not isinstance(operator, WhereClause): raise QueryException('{} is not a valid query operator'.format(operator)) clone._where.append(operator) @@ -483,10 +485,10 @@ def filter(self, *args, **kwargs): raise QueryException("Can't resolve column name: '{}'".format(col_name)) #get query operator, or use equals if not supplied - operator_class = operators.BaseWhereOperator.get_operator(col_op or 'EQ') + operator_class = BaseWhereOperator.get_operator(col_op or 'EQ') operator = operator_class() - clone._where.append(statements.WhereClause(col_name, operator, val)) + clone._where.append(WhereClause(col_name, operator, val)) return clone From 0807b299dd6b00f2f5bcbc4413fb738f0d660337 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 27 Oct 2013 17:14:27 -0700 Subject: [PATCH 0496/3726] adding equality and hash methods --- cqlengine/query.py | 29 +++++++++++++------------- cqlengine/statements.py | 6 ++++++ cqlengine/tests/query/test_queryset.py | 2 +- 3 files changed, 22 insertions(+), 15 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index cd5d7cec52..dee495c53f 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -340,10 +340,14 @@ def _select_fields(self): """ returns the fields to select """ return [] + def _validate_select(self): + """ put select query validation here """ + def _select_query(self): """ Returns a select clause based on the given filter args """ + self._validate_select() return SelectStatement( self.column_family_name, fields=self._select_fields(), @@ -627,7 +631,9 @@ def delete(self, columns=[]): execute(qs, self._where_values()) def __eq__(self, q): - return set(self._where) == set(q._where) + if len(self._where) == len(q._where): + return all([w in q._where for w in self._where]) + return False def __ne__(self, q): return not (self != q) @@ -667,30 +673,25 @@ class ModelQuerySet(AbstractQuerySet): """ """ - def _validate_where_syntax(self): + def _validate_select(self): """ Checks that a filterset will not create invalid cql """ - #check that there's either a = or IN relationship with a primary key or indexed field - equal_ops = [w for w in self._where if isinstance(w, EqualsOperator)] - token_ops = [w for w in self._where if isinstance(w.value, Token)] - if not any([w.column.primary_key or w.column.index for w in equal_ops]) and not token_ops: + equal_ops = [self.model._columns.get(w.field) for w in self._where if isinstance(w.operator, EqualsOperator)] + token_ops = [self.model._columns.get(w.field) for w in self._where if isinstance(w.operator, Token)] + if not any([w.primary_key or w.index for w in equal_ops]) and not token_ops: raise QueryException('Where clauses require either a "=" or "IN" comparison with either a primary key or indexed field') if not self._allow_filtering: #if the query is not on an indexed field - if not any([w.column.index for w in equal_ops]): - if not any([w.column.partition_key for w in equal_ops]) and not token_ops: + if not any([w.index for w in equal_ops]): + if not any([w.partition_key for w in equal_ops]) and not token_ops: raise QueryException('Filtering on a clustering key without a partition key is not allowed unless allow_filtering() is called on the querset') - if any(not w.column.partition_key for w in token_ops): + if any(not w.partition_key for w in token_ops): raise QueryException('The token() function is only supported on the partition key') - def _where_clause(self): - """ Returns a where clause based on the given filter args """ - self._validate_where_syntax() - return super(ModelQuerySet, self)._where_clause() - def _get_select_statement(self): """ Returns the fields to be returned by the select query """ + self._validate_select() if self._defer_fields or self._only_fields: fields = self.model._columns.keys() if self._defer_fields: diff --git a/cqlengine/statements.py b/cqlengine/statements.py index 281701a971..80ec6a5aef 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -50,6 +50,9 @@ def __unicode__(self): def __str__(self): return unicode(self).encode('utf-8') + def __hash__(self): + return hash(self.field) ^ hash(self.value) + def __eq__(self, other): if isinstance(other, self.__class__): return self.field == other.field and self.value == other.value @@ -86,6 +89,9 @@ def __init__(self, field, operator, value): def __unicode__(self): return u'"{}" {} :{}'.format(self.field, self.operator, self.context_id) + def __hash__(self): + return super(WhereClause, self).__hash__() ^ hash(self.operator) + def __eq__(self, other): if super(WhereClause, self).__eq__(other): return self.operator.__class__ == other.operator.__class__ diff --git a/cqlengine/tests/query/test_queryset.py b/cqlengine/tests/query/test_queryset.py index 8017c1578a..97e6033551 100644 --- a/cqlengine/tests/query/test_queryset.py +++ b/cqlengine/tests/query/test_queryset.py @@ -491,12 +491,12 @@ def test_conn_is_returned_after_filling_cache(self): assert q._cur is None - class TimeUUIDQueryModel(Model): partition = columns.UUID(primary_key=True) time = columns.TimeUUID(primary_key=True) data = columns.Text(required=False) + class TestMinMaxTimeUUIDFunctions(BaseCassEngTestCase): @classmethod From 898fc438ea058de721192c115628c0eed7d95a98 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 27 Oct 2013 17:18:10 -0700 Subject: [PATCH 0497/3726] removing unused methods --- cqlengine/query.py | 27 +++++---------------------- 1 file changed, 5 insertions(+), 22 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index dee495c53f..46efaad460 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -340,14 +340,15 @@ def _select_fields(self): """ returns the fields to select """ return [] - def _validate_select(self): + def _validate_select_where(self): """ put select query validation here """ def _select_query(self): """ Returns a select clause based on the given filter args """ - self._validate_select() + if self._where: + self._validate_select_where() return SelectStatement( self.column_family_name, fields=self._select_fields(), @@ -656,10 +657,6 @@ class SimpleQuerySet(AbstractQuerySet): """ - def _get_select_statement(self): - """ Returns the fields to be returned by the select query """ - return 'SELECT *' - def _get_result_constructor(self, names): """ Returns a function that will be used to instantiate query results @@ -673,8 +670,8 @@ class ModelQuerySet(AbstractQuerySet): """ """ - def _validate_select(self): - """ Checks that a filterset will not create invalid cql """ + def _validate_select_where(self): + """ Checks that a filterset will not create invalid select statement """ #check that there's either a = or IN relationship with a primary key or indexed field equal_ops = [self.model._columns.get(w.field) for w in self._where if isinstance(w.operator, EqualsOperator)] token_ops = [self.model._columns.get(w.field) for w in self._where if isinstance(w.operator, Token)] @@ -689,20 +686,6 @@ def _validate_select(self): if any(not w.partition_key for w in token_ops): raise QueryException('The token() function is only supported on the partition key') - def _get_select_statement(self): - """ Returns the fields to be returned by the select query """ - self._validate_select() - if self._defer_fields or self._only_fields: - fields = self.model._columns.keys() - if self._defer_fields: - fields = [f for f in fields if f not in self._defer_fields] - elif self._only_fields: - fields = self._only_fields - db_fields = [self.model._columns[f].db_field_name for f in fields] - return 'SELECT {}'.format(', '.join(['"{}"'.format(f) for f in db_fields])) - else: - return 'SELECT *' - def _select_fields(self): if self._defer_fields or self._only_fields: fields = self.model._columns.keys() From 736ec6dc7f33d510173c39c4be024fe7fab615ae Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 27 Oct 2013 19:55:32 -0700 Subject: [PATCH 0498/3726] fixing delete syntax --- cqlengine/statements.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cqlengine/statements.py b/cqlengine/statements.py index 80ec6a5aef..9b5c62790c 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -301,7 +301,8 @@ def __init__(self, table, fields=None, consistency=None, where=None): def __unicode__(self): qs = ['DELETE'] - qs += [', '.join(['"{}"'.format(f) for f in self.fields]) if self.fields else '*'] + if self.fields: + qs += [', '.join(['"{}"'.format(f) for f in self.fields])] qs += ['FROM', self.table] if self.where_clauses: From e0164167c6dcd8425395575ac5594c45ca91fcf2 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 27 Oct 2013 19:55:49 -0700 Subject: [PATCH 0499/3726] replacing delete call with statement --- cqlengine/query.py | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index 46efaad460..9db0aebb30 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -16,7 +16,7 @@ #http://www.datastax.com/docs/1.1/references/cql/index from cqlengine.operators import InOperator, EqualsOperator, GreaterThanOperator, GreaterThanOrEqualOperator from cqlengine.operators import LessThanOperator, LessThanOrEqualOperator, BaseWhereOperator -from cqlengine.statements import WhereClause, SelectStatement +from cqlengine.statements import WhereClause, SelectStatement, DeleteStatement class QueryException(CQLEngineException): pass @@ -613,23 +613,24 @@ def defer(self, fields): def create(self, **kwargs): return self.model(**kwargs).batch(self._batch).ttl(self._ttl).consistency(self._consistency).save() - #----delete--- - def delete(self, columns=[]): + def delete(self): """ Deletes the contents of a query """ #validate where clause partition_key = self.model._primary_keys.values()[0] - if not any([c.column.db_field_name == partition_key.db_field_name for c in self._where]): + if not any([c.field == partition_key.column_name for c in self._where]): raise QueryException("The partition key must be defined on delete queries") - qs = ['DELETE FROM {}'.format(self.column_family_name)] - qs += ['WHERE {}'.format(self._where_clause())] - qs = ' '.join(qs) + + dq = DeleteStatement( + self.column_family_name, + where=self._where + ) if self._batch: - self._batch.add_query(qs, self._where_values()) + self._batch.add_query(str(dq), dq.get_context()) else: - execute(qs, self._where_values()) + execute(str(dq), dq.get_context()) def __eq__(self, q): if len(self._where) == len(q._where): From 187c5b4678ce4f3c080bcf3fb296a2fb495c1977 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Mon, 28 Oct 2013 08:19:24 -0700 Subject: [PATCH 0500/3726] adding set update clause and supporting unit tests --- cqlengine/query.py | 12 +- cqlengine/statements.py | 63 ++++++++++ .../tests/columns/test_container_columns.py | 2 +- .../statements/test_assignment_clause.py | 13 -- .../statements/test_assignment_clauses.py | 115 ++++++++++++++++++ 5 files changed, 188 insertions(+), 17 deletions(-) delete mode 100644 cqlengine/tests/statements/test_assignment_clause.py create mode 100644 cqlengine/tests/statements/test_assignment_clauses.py diff --git a/cqlengine/query.py b/cqlengine/query.py index 9db0aebb30..c5939f592d 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -16,7 +16,7 @@ #http://www.datastax.com/docs/1.1/references/cql/index from cqlengine.operators import InOperator, EqualsOperator, GreaterThanOperator, GreaterThanOrEqualOperator from cqlengine.operators import LessThanOperator, LessThanOrEqualOperator, BaseWhereOperator -from cqlengine.statements import WhereClause, SelectStatement, DeleteStatement +from cqlengine.statements import WhereClause, SelectStatement, DeleteStatement, UpdateStatement class QueryException(CQLEngineException): pass @@ -795,7 +795,10 @@ def update(self, **values): ttl_stmt ) ctx.update(self._where_values()) - execute(qs, ctx, self._consistency) + if self._batch: + self._batch.add_query(qs, ctx) + else: + execute(qs, ctx, self._consistency) if nulled_columns: qs = "DELETE {} FROM {} WHERE {}".format( @@ -803,7 +806,10 @@ def update(self, **values): self.column_family_name, self._where_clause() ) - execute(qs, self._where_values(), self._consistency) + if self._batch: + self._batch.add_query(qs, self._where_values()) + else: + execute(qs, self._where_values(), self._consistency) class DMLQuery(object): diff --git a/cqlengine/statements.py b/cqlengine/statements.py index 9b5c62790c..4812b56510 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -114,6 +114,69 @@ def insert_tuple(self): return self.field, self.context_id +class SetUpdateClause(AssignmentClause): + """ updates a set collection """ + + def __init__(self, field, value, previous=None): + super(SetUpdateClause, self).__init__(field, value) + self.previous = previous + self._assignments = None + self._additions = None + self._removals = None + self._analyzed = False + + def __unicode__(self): + qs = [] + ctx_id = self.context_id + if self._assignments: + qs += ['"{}" = :{}'.format(self.field, ctx_id)] + ctx_id += 1 + if self._additions: + qs += ['"{0}" = "{0}" + :{1}'.format(self.field, ctx_id)] + ctx_id += 1 + if self._removals: + qs += ['"{0}" = "{0}" - :{1}'.format(self.field, ctx_id)] + + return ', '.join(qs) + + def _analyze(self): + """ works out the updates to be performed """ + if self.value is None or self.value == self.previous: + pass + elif self.previous is None or not any({v in self.previous for v in self.value}): + self._assignments = self.value + else: + # partial update time + self._additions = (self.value - self.previous) or None + self._removals = (self.previous - self.value) or None + self._analyzed = True + + def get_context_size(self): + if not self._analyzed: self._analyze() + return int(bool(self._assignments)) + int(bool(self._additions)) + int(bool(self._removals)) + + def update_context(self, ctx): + if not self._analyzed: self._analyze() + ctx_id = self.context_id + if self._assignments: + ctx[str(ctx_id)] = self._assignments + ctx_id += 1 + if self._additions: + ctx[str(ctx_id)] = self._additions + ctx_id += 1 + if self._removals: + ctx[str(ctx_id)] = self._removals + + + +class ListUpdateClause(AssignmentClause): + """ updates a list collection """ + + +class MapUpdateClause(AssignmentClause): + """ updates a map collection """ + + class BaseCQLStatement(object): """ The base cql statement class """ diff --git a/cqlengine/tests/columns/test_container_columns.py b/cqlengine/tests/columns/test_container_columns.py index 0d3dc524b2..3d83c3be8e 100644 --- a/cqlengine/tests/columns/test_container_columns.py +++ b/cqlengine/tests/columns/test_container_columns.py @@ -105,7 +105,7 @@ def test_partial_update_creation(self): assert len([s for s in statements if '"TEST" = "TEST" +' in s]) == 1 def test_update_from_none(self): - """ Tests that updating an 'None' list creates a straight insert statement """ + """ Tests that updating a 'None' list creates a straight insert statement """ ctx = {} col = columns.Set(columns.Integer, db_field="TEST") statements = col.get_update_statement({1, 2, 3, 4}, None, ctx) diff --git a/cqlengine/tests/statements/test_assignment_clause.py b/cqlengine/tests/statements/test_assignment_clause.py deleted file mode 100644 index b778d4b4a8..0000000000 --- a/cqlengine/tests/statements/test_assignment_clause.py +++ /dev/null @@ -1,13 +0,0 @@ -from unittest import TestCase -from cqlengine.statements import AssignmentClause - - -class AssignmentClauseTests(TestCase): - - def test_rendering(self): - pass - - def test_insert_tuple(self): - ac = AssignmentClause('a', 'b') - ac.set_context_id(10) - self.assertEqual(ac.insert_tuple(), ('a', 10)) diff --git a/cqlengine/tests/statements/test_assignment_clauses.py b/cqlengine/tests/statements/test_assignment_clauses.py new file mode 100644 index 0000000000..8f807b0431 --- /dev/null +++ b/cqlengine/tests/statements/test_assignment_clauses.py @@ -0,0 +1,115 @@ +from unittest import TestCase +from cqlengine.statements import AssignmentClause, SetUpdateClause + + +class AssignmentClauseTests(TestCase): + + def test_rendering(self): + pass + + def test_insert_tuple(self): + ac = AssignmentClause('a', 'b') + ac.set_context_id(10) + self.assertEqual(ac.insert_tuple(), ('a', 10)) + + +class SetUpdateClauseTests(TestCase): + + def test_update_from_none(self): + c = SetUpdateClause('s', {1, 2}, None) + c._analyze() + c.set_context_id(0) + + self.assertEqual(c._assignments, {1, 2}) + self.assertIsNone(c._additions) + self.assertIsNone(c._removals) + + self.assertEqual(c.get_context_size(), 1) + self.assertEqual(str(c), '"s" = :0') + + ctx = {} + c.update_context(ctx) + self.assertEqual(ctx, {'0': {1, 2}}) + + def test_null_update(self): + """ tests setting a set to None creates an empty update statement """ + c = SetUpdateClause('s', None, {1, 2}) + c._analyze() + c.set_context_id(0) + + self.assertIsNone(c._assignments) + self.assertIsNone(c._additions) + self.assertIsNone(c._removals) + + self.assertEqual(c.get_context_size(), 0) + self.assertEqual(str(c), '') + + ctx = {} + c.update_context(ctx) + self.assertEqual(ctx, {}) + + def test_no_update(self): + """ tests an unchanged value creates an empty update statement """ + c = SetUpdateClause('s', {1, 2}, {1, 2}) + c._analyze() + c.set_context_id(0) + + self.assertIsNone(c._assignments) + self.assertIsNone(c._additions) + self.assertIsNone(c._removals) + + self.assertEqual(c.get_context_size(), 0) + self.assertEqual(str(c), '') + + ctx = {} + c.update_context(ctx) + self.assertEqual(ctx, {}) + + def test_additions(self): + c = SetUpdateClause('s', {1, 2, 3}, {1, 2}) + c._analyze() + c.set_context_id(0) + + self.assertIsNone(c._assignments) + self.assertEqual(c._additions, {3}) + self.assertIsNone(c._removals) + + self.assertEqual(c.get_context_size(), 1) + self.assertEqual(str(c), '"s" = "s" + :0') + + ctx = {} + c.update_context(ctx) + self.assertEqual(ctx, {'0': {3}}) + + def test_removals(self): + c = SetUpdateClause('s', {1, 2}, {1, 2, 3}) + c._analyze() + c.set_context_id(0) + + self.assertIsNone(c._assignments) + self.assertIsNone(c._additions) + self.assertEqual(c._removals, {3}) + + self.assertEqual(c.get_context_size(), 1) + self.assertEqual(str(c), '"s" = "s" - :0') + + ctx = {} + c.update_context(ctx) + self.assertEqual(ctx, {'0': {3}}) + + def test_additions_and_removals(self): + c = SetUpdateClause('s', {2, 3}, {1, 2}) + c._analyze() + c.set_context_id(0) + + self.assertIsNone(c._assignments) + self.assertEqual(c._additions, {3}) + self.assertEqual(c._removals, {1}) + + self.assertEqual(c.get_context_size(), 2) + self.assertEqual(str(c), '"s" = "s" + :0, "s" = "s" - :1') + + ctx = {} + c.update_context(ctx) + self.assertEqual(ctx, {'0': {3}, '1': {1}}) + From 022d31021bff735e8b93f1fa82a63ac49f027bd4 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Mon, 28 Oct 2013 14:28:53 -0700 Subject: [PATCH 0501/3726] validation around deleting containers --- cqlengine/tests/columns/test_container_columns.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/cqlengine/tests/columns/test_container_columns.py b/cqlengine/tests/columns/test_container_columns.py index b1a7268062..5af677fb0c 100644 --- a/cqlengine/tests/columns/test_container_columns.py +++ b/cqlengine/tests/columns/test_container_columns.py @@ -411,9 +411,15 @@ def test_updates_from_none(self): m.int_map = expected m.save() + m2 = TestMapModel.get(partition=m.partition) assert m2.int_map == expected + m2.int_map = None + m2.save() + m3 = TestMapModel.get(partition=m.partition) + assert m3.int_map != expected + def test_updates_to_none(self): """ Tests that setting the field to None works as expected """ m = TestMapModel.create(int_map={1: uuid4()}) From 20afb040701cf03ae41d11306a54df31afdde433 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Mon, 28 Oct 2013 18:29:55 -0700 Subject: [PATCH 0502/3726] adding list update clause --- cqlengine/statements.py | 108 +++++++++++++++- .../statements/test_assignment_clauses.py | 121 +++++++++++++++++- 2 files changed, 221 insertions(+), 8 deletions(-) diff --git a/cqlengine/statements.py b/cqlengine/statements.py index 4812b56510..43a4c1017a 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -114,16 +114,31 @@ def insert_tuple(self): return self.field, self.context_id -class SetUpdateClause(AssignmentClause): - """ updates a set collection """ +class ContainerUpdateClause(AssignmentClause): def __init__(self, field, value, previous=None): - super(SetUpdateClause, self).__init__(field, value) + super(ContainerUpdateClause, self).__init__(field, value) self.previous = previous self._assignments = None + self._analyzed = False + + def _analyze(self): + raise NotImplementedError + + def get_context_size(self): + raise NotImplementedError + + def update_context(self, ctx): + raise NotImplementedError + + +class SetUpdateClause(ContainerUpdateClause): + """ updates a set collection """ + + def __init__(self, field, value, previous=None): + super(SetUpdateClause, self).__init__(field, value, previous) self._additions = None self._removals = None - self._analyzed = False def __unicode__(self): qs = [] @@ -168,12 +183,91 @@ def update_context(self, ctx): ctx[str(ctx_id)] = self._removals - -class ListUpdateClause(AssignmentClause): +class ListUpdateClause(ContainerUpdateClause): """ updates a list collection """ + def __init__(self, field, value, previous=None): + super(ListUpdateClause, self).__init__(field, value, previous) + self._append = None + self._prepend = None + + def __unicode__(self): + qs = [] + ctx_id = self.context_id + if self._assignments: + qs += ['"{}" = :{}'.format(self.field, ctx_id)] + ctx_id += 1 + + if self._prepend: + qs += ['"{0}" = :{1} + "{0}"'.format(self.field, ctx_id)] + ctx_id += 1 + + if self._append: + qs += ['"{0}" = "{0}" + :{1}'.format(self.field, ctx_id)] + + return ', '.join(qs) + + def get_context_size(self): + if not self._analyzed: self._analyze() + return int(bool(self._assignments)) + int(bool(self._append)) + int(bool(self._prepend)) + + def update_context(self, ctx): + if not self._analyzed: self._analyze() + ctx_id = self.context_id + if self._assignments: + ctx[str(ctx_id)] = self._assignments + ctx_id += 1 + if self._prepend: + # CQL seems to prepend element at a time, starting + # with the element at idx 0, we can either reverse + # it here, or have it inserted in reverse + ctx[str(ctx_id)] = list(reversed(self._prepend)) + ctx_id += 1 + if self._append: + ctx[str(ctx_id)] = self._append + + def _analyze(self): + """ works out the updates to be performed """ + if self.value is None or self.value == self.previous: + pass + + elif self.previous is None: + self._assignments = self.value + + elif len(self.value) < len(self.previous): + # if elements have been removed, + # rewrite the whole list + self._assignments = self.value + + elif len(self.previous) == 0: + # if we're updating from an empty + # list, do a complete insert + self._assignments = self.value + else: + + # the max start idx we want to compare + search_space = len(self.value) - max(0, len(self.previous)-1) + + # the size of the sub lists we want to look at + search_size = len(self.previous) + + for i in range(search_space): + #slice boundary + j = i + search_size + sub = self.value[i:j] + idx_cmp = lambda idx: self.previous[idx] == sub[idx] + if idx_cmp(0) and idx_cmp(-1) and self.previous == sub: + self._prepend = self.value[:i] or None + self._append = self.value[j:] or None + break + + # if both append and prepend are still None after looking + # at both lists, an insert statement will be created + if self._prepend is self._append is None: + self._assignments = self.value + -class MapUpdateClause(AssignmentClause): +class MapUpdateClause(ContainerUpdateClause): """ updates a map collection """ diff --git a/cqlengine/tests/statements/test_assignment_clauses.py b/cqlengine/tests/statements/test_assignment_clauses.py index 8f807b0431..bb38fb97f8 100644 --- a/cqlengine/tests/statements/test_assignment_clauses.py +++ b/cqlengine/tests/statements/test_assignment_clauses.py @@ -1,5 +1,5 @@ from unittest import TestCase -from cqlengine.statements import AssignmentClause, SetUpdateClause +from cqlengine.statements import AssignmentClause, SetUpdateClause, ListUpdateClause class AssignmentClauseTests(TestCase): @@ -113,3 +113,122 @@ def test_additions_and_removals(self): c.update_context(ctx) self.assertEqual(ctx, {'0': {3}, '1': {1}}) + +class ListUpdateClauseTests(TestCase): + + def test_update_from_none(self): + c = ListUpdateClause('s', [1, 2, 3]) + c._analyze() + c.set_context_id(0) + + self.assertEqual(c._assignments, [1, 2, 3]) + self.assertIsNone(c._append) + self.assertIsNone(c._prepend) + + self.assertEqual(c.get_context_size(), 1) + self.assertEqual(str(c), '"s" = :0') + + ctx = {} + c.update_context(ctx) + self.assertEqual(ctx, {'0': [1, 2, 3]}) + + def test_update_from_empty(self): + c = ListUpdateClause('s', [1, 2, 3], previous=[]) + c._analyze() + c.set_context_id(0) + + self.assertEqual(c._assignments, [1, 2, 3]) + self.assertIsNone(c._append) + self.assertIsNone(c._prepend) + + self.assertEqual(c.get_context_size(), 1) + self.assertEqual(str(c), '"s" = :0') + + ctx = {} + c.update_context(ctx) + self.assertEqual(ctx, {'0': [1, 2, 3]}) + + def test_update_from_different_list(self): + c = ListUpdateClause('s', [1, 2, 3], previous=[3, 2, 1]) + c._analyze() + c.set_context_id(0) + + self.assertEqual(c._assignments, [1, 2, 3]) + self.assertIsNone(c._append) + self.assertIsNone(c._prepend) + + self.assertEqual(c.get_context_size(), 1) + self.assertEqual(str(c), '"s" = :0') + + ctx = {} + c.update_context(ctx) + self.assertEqual(ctx, {'0': [1, 2, 3]}) + + def test_append(self): + c = ListUpdateClause('s', [1, 2, 3, 4], previous=[1, 2]) + c._analyze() + c.set_context_id(0) + + self.assertIsNone(c._assignments) + self.assertEqual(c._append, [3, 4]) + self.assertIsNone(c._prepend) + + self.assertEqual(c.get_context_size(), 1) + self.assertEqual(str(c), '"s" = "s" + :0') + + ctx = {} + c.update_context(ctx) + self.assertEqual(ctx, {'0': [3, 4]}) + + def test_prepend(self): + c = ListUpdateClause('s', [1, 2, 3, 4], previous=[3, 4]) + c._analyze() + c.set_context_id(0) + + self.assertIsNone(c._assignments) + self.assertIsNone(c._append) + self.assertEqual(c._prepend, [1, 2]) + + self.assertEqual(c.get_context_size(), 1) + self.assertEqual(str(c), '"s" = :0 + "s"') + + ctx = {} + c.update_context(ctx) + # test context list reversal + self.assertEqual(ctx, {'0': [2, 1]}) + + def test_append_and_prepend(self): + c = ListUpdateClause('s', [1, 2, 3, 4, 5, 6], previous=[3, 4]) + c._analyze() + c.set_context_id(0) + + self.assertIsNone(c._assignments) + self.assertEqual(c._append, [5, 6]) + self.assertEqual(c._prepend, [1, 2]) + + self.assertEqual(c.get_context_size(), 2) + self.assertEqual(str(c), '"s" = :0 + "s", "s" = "s" + :1') + + ctx = {} + c.update_context(ctx) + # test context list reversal + self.assertEqual(ctx, {'0': [2, 1], '1': [5, 6]}) + + def test_shrinking_list_update(self): + """ tests that updating to a smaller list results in an insert statement """ + c = ListUpdateClause('s', [1, 2, 3], previous=[1, 2, 3, 4]) + c._analyze() + c.set_context_id(0) + + self.assertEqual(c._assignments, [1, 2, 3]) + self.assertIsNone(c._append) + self.assertIsNone(c._prepend) + + self.assertEqual(c.get_context_size(), 1) + self.assertEqual(str(c), '"s" = :0') + + ctx = {} + c.update_context(ctx) + self.assertEqual(ctx, {'0': [1, 2, 3]}) + + From 06632cf53215b7f7d076052f1356e62ae85dbbe4 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Mon, 28 Oct 2013 18:56:32 -0700 Subject: [PATCH 0503/3726] adding map update clause, delete clause, and field delete clause --- cqlengine/statements.py | 44 ++++++++++++++++++++++++++++++++++++++++- 1 file changed, 43 insertions(+), 1 deletion(-) diff --git a/cqlengine/statements.py b/cqlengine/statements.py index 43a4c1017a..b3b1c6d5b3 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -266,10 +266,44 @@ def _analyze(self): if self._prepend is self._append is None: self._assignments = self.value + self._analyzed = True + class MapUpdateClause(ContainerUpdateClause): """ updates a map collection """ + def __init__(self, field, value, previous=None): + super(MapUpdateClause, self).__init__(field, value, previous) + + +class BaseDeleteClause(BaseClause): + pass + + +class FieldDeleteClause(BaseDeleteClause): + """ deletes a field from a row """ + + def __init__(self, field): + super(FieldDeleteClause, self).__init__(field, None) + + def __unicode__(self): + return self.field + + def update_context(self, ctx): + pass + + def get_context_size(self): + return 0 + + +class MapDeleteClause(BaseDeleteClause): + """ removes keys from a map """ + + def __init__(self, field, value, previous=None): + super(MapDeleteClause, self).__init__(field, value) + self.previous = previous + self._analysed = False + class BaseCQLStatement(object): """ The base cql statement class """ @@ -454,7 +488,15 @@ def __init__(self, table, fields=None, consistency=None, where=None): consistency=consistency, where=where, ) - self.fields = [fields] if isinstance(fields, basestring) else (fields or []) + for field in fields or []: + self.add_field(field) + + def add_field(self, field): + if isinstance(field, basestring): + field = FieldDeleteClause(field) + if not isinstance(field, BaseClause): + raise StatementException("only instances of AssignmentClause can be added to statements") + self.fields.append(field) def __unicode__(self): qs = ['DELETE'] From cae7a1945a184e6a78add19e9d180b6fbee590f0 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Mon, 28 Oct 2013 19:01:44 -0700 Subject: [PATCH 0504/3726] adding not implemented errors and stubbed tests for map and delete field clauses --- cqlengine/statements.py | 37 ++++++++++++++++++- .../statements/test_assignment_clauses.py | 10 +++++ 2 files changed, 46 insertions(+), 1 deletion(-) diff --git a/cqlengine/statements.py b/cqlengine/statements.py index b3b1c6d5b3..a32bd6cbb9 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -192,6 +192,7 @@ def __init__(self, field, value, previous=None): self._prepend = None def __unicode__(self): + if not self._analyzed: self._analyze() qs = [] ctx_id = self.context_id if self._assignments: @@ -275,6 +276,21 @@ class MapUpdateClause(ContainerUpdateClause): def __init__(self, field, value, previous=None): super(MapUpdateClause, self).__init__(field, value, previous) + def get_context_size(self): + if not self._analyzed: + self._analyze() + raise NotImplementedError('implement this') + + def update_context(self, ctx): + if not self._analyzed: + self._analyze() + raise NotImplementedError('implement this') + + def __unicode__(self): + if not self._analyzed: + self._analyze() + raise NotImplementedError('implement this') + class BaseDeleteClause(BaseClause): pass @@ -302,7 +318,26 @@ class MapDeleteClause(BaseDeleteClause): def __init__(self, field, value, previous=None): super(MapDeleteClause, self).__init__(field, value) self.previous = previous - self._analysed = False + self._analyzed = False + self._removals = None + + def _analyze(self): + self._analyzed = True + + def update_context(self, ctx): + if not self._analyzed: + self._analyze() + raise NotImplementedError('implement this') + + def get_context_size(self): + if not self._analyzed: + self._analyze() + raise NotImplementedError('implement this') + + def __unicode__(self): + if not self._analyzed: + self._analyze() + raise NotImplementedError('implement this') class BaseCQLStatement(object): diff --git a/cqlengine/tests/statements/test_assignment_clauses.py b/cqlengine/tests/statements/test_assignment_clauses.py index bb38fb97f8..3da24decde 100644 --- a/cqlengine/tests/statements/test_assignment_clauses.py +++ b/cqlengine/tests/statements/test_assignment_clauses.py @@ -232,3 +232,13 @@ def test_shrinking_list_update(self): self.assertEqual(ctx, {'0': [1, 2, 3]}) +class MapUpdateTests(TestCase): + pass + + +class MapDeleteTests(TestCase): + pass + + +class FieldDeleteTests(TestCase): + pass From 871ba29f3bb8e2a9781ec73cd9d3f980127e593d Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Wed, 30 Oct 2013 20:07:10 -0700 Subject: [PATCH 0505/3726] implementing map update clause and adding stubbed out tests --- cqlengine/statements.py | 31 +++++++++++++------ .../statements/test_assignment_clauses.py | 14 +++++++-- 2 files changed, 34 insertions(+), 11 deletions(-) diff --git a/cqlengine/statements.py b/cqlengine/statements.py index a32bd6cbb9..05ea9def94 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -275,21 +275,34 @@ class MapUpdateClause(ContainerUpdateClause): def __init__(self, field, value, previous=None): super(MapUpdateClause, self).__init__(field, value, previous) + self._updates = None + + def _analyze(self): + self._updates = sorted([k for k, v in self.value.items() if v != self.previous.get(k)]) or None + self._analyzed = True def get_context_size(self): - if not self._analyzed: - self._analyze() - raise NotImplementedError('implement this') + if not self._analyzed: self._analyze() + return len(self._updates or []) * 2 def update_context(self, ctx): - if not self._analyzed: - self._analyze() - raise NotImplementedError('implement this') + if not self._analyzed: self._analyze() + ctx_id = self.context_id + for key in self._updates or []: + ctx[str(ctx_id)] = key + ctx[str(ctx_id + 1)] = self.value.get(key) + ctx_id += 2 def __unicode__(self): - if not self._analyzed: - self._analyze() - raise NotImplementedError('implement this') + if not self._analyzed: self._analyze() + qs = [] + + ctx_id = self.context_id + for _ in self._updates or []: + qs += ['"{}"[:{}] = :{}'.format(self.field, ctx_id, ctx_id + 1)] + ctx_id += 2 + + return ', '.join(qs) class BaseDeleteClause(BaseClause): diff --git a/cqlengine/tests/statements/test_assignment_clauses.py b/cqlengine/tests/statements/test_assignment_clauses.py index 3da24decde..bbe044a5bf 100644 --- a/cqlengine/tests/statements/test_assignment_clauses.py +++ b/cqlengine/tests/statements/test_assignment_clauses.py @@ -233,11 +233,21 @@ def test_shrinking_list_update(self): class MapUpdateTests(TestCase): - pass + + def test_update(self): + pass + + def test_update_from_null(self): + pass + + def test_nulled_columns_arent_included(self): + pass class MapDeleteTests(TestCase): - pass + + def test_update(self): + pass class FieldDeleteTests(TestCase): From cdf45bf1abe889c85199e0c120741689e18d3d7e Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Wed, 30 Oct 2013 20:08:22 -0700 Subject: [PATCH 0506/3726] removing extra line --- docs/topics/models.rst | 1 - 1 file changed, 1 deletion(-) diff --git a/docs/topics/models.rst b/docs/topics/models.rst index 9d214d3a7f..c55a81d687 100644 --- a/docs/topics/models.rst +++ b/docs/topics/models.rst @@ -147,7 +147,6 @@ Model Methods -- method:: update(**values) - Performs an update on the model instance. You can pass in values to set on the model for updating, or you can call without values to execute an update against any modified fields. If no fields on the model have been modified since loading, no query will be From 979166f161f46c4c027b1651e92db33dde66b02c Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Wed, 30 Oct 2013 20:12:12 -0700 Subject: [PATCH 0507/3726] fixing update method syntax --- docs/topics/models.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/topics/models.rst b/docs/topics/models.rst index c55a81d687..1fae6070d4 100644 --- a/docs/topics/models.rst +++ b/docs/topics/models.rst @@ -145,14 +145,14 @@ Model Methods Sets the ttl values to run instance updates and inserts queries with. - -- method:: update(**values) + .. method:: update(**values) Performs an update on the model instance. You can pass in values to set on the model for updating, or you can call without values to execute an update against any modified fields. If no fields on the model have been modified since loading, no query will be performed. Model validation is performed normally. - -- method:: get_changed_columns() + .. method:: get_changed_columns() Returns a list of column names that have changed since the model was instantiated or saved From ff70a5595b2d17d0bf2d5cd7344e37825d2353b8 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 31 Oct 2013 21:23:05 -0700 Subject: [PATCH 0508/3726] implementing map delete and field delete --- cqlengine/statements.py | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/cqlengine/statements.py b/cqlengine/statements.py index 05ea9def94..f5dc82064b 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -276,6 +276,7 @@ class MapUpdateClause(ContainerUpdateClause): def __init__(self, field, value, previous=None): super(MapUpdateClause, self).__init__(field, value, previous) self._updates = None + self.previous = self.previous or {} def _analyze(self): self._updates = sorted([k for k, v in self.value.items() if v != self.previous.get(k)]) or None @@ -316,7 +317,7 @@ def __init__(self, field): super(FieldDeleteClause, self).__init__(field, None) def __unicode__(self): - return self.field + return '"{}"'.format(self.field) def update_context(self, ctx): pass @@ -330,27 +331,27 @@ class MapDeleteClause(BaseDeleteClause): def __init__(self, field, value, previous=None): super(MapDeleteClause, self).__init__(field, value) - self.previous = previous + self.value = self.value or {} + self.previous = previous or {} self._analyzed = False self._removals = None def _analyze(self): + self._removals = sorted([k for k in self.previous if k not in self.value]) self._analyzed = True def update_context(self, ctx): - if not self._analyzed: - self._analyze() - raise NotImplementedError('implement this') + if not self._analyzed: self._analyze() + for idx, key in enumerate(self._removals): + ctx[str(self.context_id + idx)] = key def get_context_size(self): - if not self._analyzed: - self._analyze() - raise NotImplementedError('implement this') + if not self._analyzed: self._analyze() + return len(self._removals) def __unicode__(self): - if not self._analyzed: - self._analyze() - raise NotImplementedError('implement this') + if not self._analyzed: self._analyze() + return ', '.join(['"{}"[:{}]'.format(self.field, self.context_id + i) for i in range(len(self._removals))]) class BaseCQLStatement(object): From 6b75dc357b242ff6f0fcfeebd1213540b6d4f794 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 31 Oct 2013 21:23:17 -0700 Subject: [PATCH 0509/3726] adding tests around map update and delete, and field delete --- .../statements/test_assignment_clauses.py | 49 ++++++++++++++++--- 1 file changed, 43 insertions(+), 6 deletions(-) diff --git a/cqlengine/tests/statements/test_assignment_clauses.py b/cqlengine/tests/statements/test_assignment_clauses.py index bbe044a5bf..03d2a5ab0f 100644 --- a/cqlengine/tests/statements/test_assignment_clauses.py +++ b/cqlengine/tests/statements/test_assignment_clauses.py @@ -1,5 +1,5 @@ from unittest import TestCase -from cqlengine.statements import AssignmentClause, SetUpdateClause, ListUpdateClause +from cqlengine.statements import AssignmentClause, SetUpdateClause, ListUpdateClause, MapUpdateClause, MapDeleteClause, FieldDeleteClause class AssignmentClauseTests(TestCase): @@ -235,20 +235,57 @@ def test_shrinking_list_update(self): class MapUpdateTests(TestCase): def test_update(self): - pass + c = MapUpdateClause('s', {3: 0, 5: 6}, {5: 0, 3: 4}) + c._analyze() + c.set_context_id(0) + + self.assertEqual(c._updates, [3, 5]) + self.assertEqual(c.get_context_size(), 4) + self.assertEqual(str(c), '"s"[:0] = :1, "s"[:2] = :3') + + ctx = {} + c.update_context(ctx) + self.assertEqual(ctx, {'0': 3, "1": 0, '2': 5, '3': 6}) def test_update_from_null(self): - pass + c = MapUpdateClause('s', {3: 0, 5: 6}) + c._analyze() + c.set_context_id(0) + + self.assertEqual(c._updates, [3, 5]) + self.assertEqual(c.get_context_size(), 4) + self.assertEqual(str(c), '"s"[:0] = :1, "s"[:2] = :3') + + ctx = {} + c.update_context(ctx) + self.assertEqual(ctx, {'0': 3, "1": 0, '2': 5, '3': 6}) def test_nulled_columns_arent_included(self): - pass + c = MapUpdateClause('s', {3: 0}, {1: 2, 3: 4}) + c._analyze() + c.set_context_id(0) + + self.assertNotIn(1, c._updates) class MapDeleteTests(TestCase): def test_update(self): - pass + c = MapDeleteClause('s', {3: 0}, {1: 2, 3: 4, 5: 6}) + c._analyze() + c.set_context_id(0) + + self.assertEqual(c._removals, [1, 5]) + self.assertEqual(c.get_context_size(), 2) + self.assertEqual(str(c), '"s"[:0], "s"[:1]') + + ctx = {} + c.update_context(ctx) + self.assertEqual(ctx, {'0': 1, '1': 5}) class FieldDeleteTests(TestCase): - pass + + def test_str(self): + f = FieldDeleteClause("blake") + assert str(f) == '"blake"' From 6d873e604d99079899f9b017e8226b2bb167a1b8 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sat, 2 Nov 2013 10:02:38 -0700 Subject: [PATCH 0510/3726] fixing delete statement --- cqlengine/statements.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cqlengine/statements.py b/cqlengine/statements.py index f5dc82064b..1db9af0773 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -537,6 +537,7 @@ def __init__(self, table, fields=None, consistency=None, where=None): consistency=consistency, where=where, ) + self.fields = [] for field in fields or []: self.add_field(field) From 48870d59b1578a3811cf035af1eec7f6a105c67f Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 3 Nov 2013 07:48:46 -0800 Subject: [PATCH 0511/3726] updating named query tests --- cqlengine/tests/query/test_named.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/cqlengine/tests/query/test_named.py b/cqlengine/tests/query/test_named.py index cba46dcb59..8f9781da96 100644 --- a/cqlengine/tests/query/test_named.py +++ b/cqlengine/tests/query/test_named.py @@ -1,4 +1,4 @@ -from cqlengine import query +from cqlengine import operators from cqlengine.named import NamedKeyspace from cqlengine.query import ResultObject from cqlengine.tests.query.test_queryset import BaseQuerySetUsage @@ -21,14 +21,14 @@ def test_query_filter_parsing(self): assert len(query1._where) == 1 op = query1._where[0] - assert isinstance(op, query.EqualsOperator) + assert isinstance(op, operators.EqualsOperator) assert op.value == 5 query2 = query1.filter(expected_result__gte=1) assert len(query2._where) == 2 op = query2._where[1] - assert isinstance(op, query.GreaterThanOrEqualOperator) + assert isinstance(op, operators.GreaterThanOrEqualOperator) assert op.value == 1 def test_query_expression_parsing(self): @@ -37,14 +37,14 @@ def test_query_expression_parsing(self): assert len(query1._where) == 1 op = query1._where[0] - assert isinstance(op, query.EqualsOperator) + assert isinstance(op.operator, operators.EqualsOperator) assert op.value == 5 query2 = query1.filter(self.table.column('expected_result') >= 1) assert len(query2._where) == 2 op = query2._where[1] - assert isinstance(op, query.GreaterThanOrEqualOperator) + assert isinstance(op.operator, operators.GreaterThanOrEqualOperator) assert op.value == 1 def test_filter_method_where_clause_generation(self): From 5b2351488f84e9065069d3f17215966c7986d2ac Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 3 Nov 2013 07:49:11 -0800 Subject: [PATCH 0512/3726] using to_database for filter values --- cqlengine/query.py | 2 +- cqlengine/tests/query/test_datetime_queries.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index c5939f592d..7de3898da6 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -493,7 +493,7 @@ def filter(self, *args, **kwargs): operator_class = BaseWhereOperator.get_operator(col_op or 'EQ') operator = operator_class() - clone._where.append(WhereClause(col_name, operator, val)) + clone._where.append(WhereClause(col_name, operator, column.to_database(val))) return clone diff --git a/cqlengine/tests/query/test_datetime_queries.py b/cqlengine/tests/query/test_datetime_queries.py index e374bd7d18..39cc0e332f 100644 --- a/cqlengine/tests/query/test_datetime_queries.py +++ b/cqlengine/tests/query/test_datetime_queries.py @@ -43,7 +43,7 @@ def test_range_query(self): end = start + timedelta(days=3) results = DateTimeQueryTestModel.filter(user=0, day__gte=start, day__lt=end) - assert len(results) == 3 + assert len(results) == 3 def test_datetime_precision(self): """ Tests that millisecond resolution is preserved when saving datetime objects """ From fb21199f51c42b81a017740dfaabb1d0f92e97cf Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 3 Nov 2013 08:00:25 -0800 Subject: [PATCH 0513/3726] fixing statement tests --- cqlengine/statements.py | 8 ++++++-- cqlengine/tests/statements/test_base_clause.py | 2 +- cqlengine/tests/statements/test_delete_statement.py | 5 +++-- cqlengine/tests/statements/test_select_statement.py | 2 +- cqlengine/tests/statements/test_update_statement.py | 2 +- 5 files changed, 12 insertions(+), 7 deletions(-) diff --git a/cqlengine/statements.py b/cqlengine/statements.py index 1db9af0773..697bd6ceb5 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -72,7 +72,7 @@ def set_context_id(self, i): def update_context(self, ctx): """ updates the query context with this clauses values """ assert isinstance(ctx, dict) - ctx[str(self.context_id)] = ValueQuoter(self.value) + ctx[str(self.context_id)] = self.value class WhereClause(BaseClause): @@ -538,6 +538,8 @@ def __init__(self, table, fields=None, consistency=None, where=None): where=where, ) self.fields = [] + if isinstance(fields, basestring): + fields = [fields] for field in fields or []: self.add_field(field) @@ -551,7 +553,9 @@ def add_field(self, field): def __unicode__(self): qs = ['DELETE'] if self.fields: - qs += [', '.join(['"{}"'.format(f) for f in self.fields])] + qs += [', '.join(['{}'.format(f) for f in self.fields])] + else: + qs += ['*'] qs += ['FROM', self.table] if self.where_clauses: diff --git a/cqlengine/tests/statements/test_base_clause.py b/cqlengine/tests/statements/test_base_clause.py index 04d7d52845..c5bbeb402d 100644 --- a/cqlengine/tests/statements/test_base_clause.py +++ b/cqlengine/tests/statements/test_base_clause.py @@ -11,6 +11,6 @@ def test_context_updating(self): ctx = {} ss.set_context_id(10) ss.update_context(ctx) - assert ctx == {10: 'b'} + assert ctx == {'10': 'b'} diff --git a/cqlengine/tests/statements/test_delete_statement.py b/cqlengine/tests/statements/test_delete_statement.py index 58d05c867e..be4087779a 100644 --- a/cqlengine/tests/statements/test_delete_statement.py +++ b/cqlengine/tests/statements/test_delete_statement.py @@ -8,7 +8,8 @@ class DeleteStatementTests(TestCase): def test_single_field_is_listified(self): """ tests that passing a string field into the constructor puts it into a list """ ds = DeleteStatement('table', 'field') - self.assertEqual(ds.fields, ['field']) + self.assertEqual(len(ds.fields), 1) + self.assertEqual(ds.fields[0].field, 'field') def test_field_rendering(self): """ tests that fields are properly added to the select statement """ @@ -35,4 +36,4 @@ def test_where_clause_rendering(self): def test_context(self): ds = DeleteStatement('table', None) ds.add_where_clause(WhereClause('a', EqualsOperator(), 'b')) - self.assertEqual(ds.get_context(), {0: 'b'}) + self.assertEqual(ds.get_context(), {'0': 'b'}) diff --git a/cqlengine/tests/statements/test_select_statement.py b/cqlengine/tests/statements/test_select_statement.py index 495258c2fd..d59c8eb77c 100644 --- a/cqlengine/tests/statements/test_select_statement.py +++ b/cqlengine/tests/statements/test_select_statement.py @@ -42,7 +42,7 @@ def test_count(self): def test_context(self): ss = SelectStatement('table') ss.add_where_clause(WhereClause('a', EqualsOperator(), 'b')) - self.assertEqual(ss.get_context(), {0: 'b'}) + self.assertEqual(ss.get_context(), {'0': 'b'}) def test_additional_rendering(self): ss = SelectStatement( diff --git a/cqlengine/tests/statements/test_update_statement.py b/cqlengine/tests/statements/test_update_statement.py index 6c633852d1..0945d2341e 100644 --- a/cqlengine/tests/statements/test_update_statement.py +++ b/cqlengine/tests/statements/test_update_statement.py @@ -23,7 +23,7 @@ def test_context(self): us.add_assignment_clause(AssignmentClause('a', 'b')) us.add_assignment_clause(AssignmentClause('c', 'd')) us.add_where_clause(WhereClause('a', EqualsOperator(), 'x')) - self.assertEqual(us.get_context(), {0: 'b', 1: 'd', 2: 'x'}) + self.assertEqual(us.get_context(), {'0': 'b', '1': 'd', '2': 'x'}) def test_additional_rendering(self): us = UpdateStatement('table', ttl=60) From a0e99934020ae840bb07859dd227118e4eb156af Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 3 Nov 2013 08:14:30 -0800 Subject: [PATCH 0514/3726] fixing delete statement --- cqlengine/statements.py | 2 -- cqlengine/tests/statements/test_delete_statement.py | 10 +++++----- 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/cqlengine/statements.py b/cqlengine/statements.py index 697bd6ceb5..f974a18951 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -554,8 +554,6 @@ def __unicode__(self): qs = ['DELETE'] if self.fields: qs += [', '.join(['{}'.format(f) for f in self.fields])] - else: - qs += ['*'] qs += ['FROM', self.table] if self.where_clauses: diff --git a/cqlengine/tests/statements/test_delete_statement.py b/cqlengine/tests/statements/test_delete_statement.py index be4087779a..b1055ea4fb 100644 --- a/cqlengine/tests/statements/test_delete_statement.py +++ b/cqlengine/tests/statements/test_delete_statement.py @@ -20,18 +20,18 @@ def test_field_rendering(self): def test_none_fields_rendering(self): """ tests that a '*' is added if no fields are passed in """ ds = DeleteStatement('table', None) - self.assertTrue(unicode(ds).startswith('DELETE *'), unicode(ds)) - self.assertTrue(str(ds).startswith('DELETE *'), str(ds)) + self.assertTrue(unicode(ds).startswith('DELETE FROM'), unicode(ds)) + self.assertTrue(str(ds).startswith('DELETE FROM'), str(ds)) def test_table_rendering(self): ds = DeleteStatement('table', None) - self.assertTrue(unicode(ds).startswith('DELETE * FROM table'), unicode(ds)) - self.assertTrue(str(ds).startswith('DELETE * FROM table'), str(ds)) + self.assertTrue(unicode(ds).startswith('DELETE FROM table'), unicode(ds)) + self.assertTrue(str(ds).startswith('DELETE FROM table'), str(ds)) def test_where_clause_rendering(self): ds = DeleteStatement('table', None) ds.add_where_clause(WhereClause('a', EqualsOperator(), 'b')) - self.assertEqual(unicode(ds), 'DELETE * FROM table WHERE "a" = :0', unicode(ds)) + self.assertEqual(unicode(ds), 'DELETE FROM table WHERE "a" = :0', unicode(ds)) def test_context(self): ds = DeleteStatement('table', None) From 2daf6390e6a8ec7b5d99d20861f8db3e98b8216b Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 3 Nov 2013 08:19:30 -0800 Subject: [PATCH 0515/3726] fixing IN statement to_database --- cqlengine/query.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index 7de3898da6..3cdfd57e77 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -493,7 +493,14 @@ def filter(self, *args, **kwargs): operator_class = BaseWhereOperator.get_operator(col_op or 'EQ') operator = operator_class() - clone._where.append(WhereClause(col_name, operator, column.to_database(val))) + if isinstance(operator, InOperator): + if not isinstance(val, (list, tuple)): + raise QueryException('IN queries must use a list/tuple value') + query_val = [column.to_database(v) for v in val] + else: + query_val = column.to_database(val) + + clone._where.append(WhereClause(col_name, operator, query_val)) return clone From 7d98bfb67c82d5101fb4e5eba683ea4fdeb3fda6 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 3 Nov 2013 08:42:51 -0800 Subject: [PATCH 0516/3726] refactoring where test --- cqlengine/tests/query/test_named.py | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/cqlengine/tests/query/test_named.py b/cqlengine/tests/query/test_named.py index 8f9781da96..e63261f79c 100644 --- a/cqlengine/tests/query/test_named.py +++ b/cqlengine/tests/query/test_named.py @@ -1,5 +1,6 @@ from cqlengine import operators from cqlengine.named import NamedKeyspace +from cqlengine.operators import EqualsOperator, GreaterThanOrEqualOperator from cqlengine.query import ResultObject from cqlengine.tests.query.test_queryset import BaseQuerySetUsage from cqlengine.tests.base import BaseCassEngTestCase @@ -52,14 +53,24 @@ def test_filter_method_where_clause_generation(self): Tests the where clause creation """ query1 = self.table.objects(test_id=5) - ids = [o.query_value.identifier for o in query1._where] - where = query1._where_clause() - assert where == '"test_id" = :{}'.format(*ids) + self.assertEqual(len(query1._where), 1) + where = query1._where[0] + self.assertEqual(where.field, 'test_id') + self.assertEqual(where.value, 5) query2 = query1.filter(expected_result__gte=1) - ids = [o.query_value.identifier for o in query2._where] - where = query2._where_clause() - assert where == '"test_id" = :{} AND "expected_result" >= :{}'.format(*ids) + where = query2._where + self.assertEqual(len(where), 2) + + where = query2._where[0] + self.assertEqual(where.field, 'test_id') + self.assertIsInstance(where.operator, EqualsOperator) + self.assertEqual(where.value, 5) + + where = query2._where[1] + self.assertEqual(where.field, 'expected_result') + self.assertIsInstance(where.operator, GreaterThanOrEqualOperator) + self.assertEqual(where.value, 1) def test_query_expression_where_clause_generation(self): """ From 1dd5b50bd7ce432a9dd39a8bae99fc154c98a077 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 3 Nov 2013 08:43:05 -0800 Subject: [PATCH 0517/3726] refactoring update query --- cqlengine/query.py | 42 ++++++++++----------------- cqlengine/tests/query/test_updates.py | 3 ++ 2 files changed, 18 insertions(+), 27 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index 3cdfd57e77..545d5810ae 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -16,7 +16,7 @@ #http://www.datastax.com/docs/1.1/references/cql/index from cqlengine.operators import InOperator, EqualsOperator, GreaterThanOperator, GreaterThanOrEqualOperator from cqlengine.operators import LessThanOperator, LessThanOrEqualOperator, BaseWhereOperator -from cqlengine.statements import WhereClause, SelectStatement, DeleteStatement, UpdateStatement +from cqlengine.statements import WhereClause, SelectStatement, DeleteStatement, UpdateStatement, AssignmentClause class QueryException(CQLEngineException): pass @@ -767,9 +767,8 @@ def update(self, **values): if not values: return - set_statements = [] - ctx = {} nulled_columns = set() + us = UpdateStatement(self.column_family_name, where=self._where, ttl=self._ttl) for name, val in values.items(): col = self.model._columns.get(name) # check for nonexistant columns @@ -784,39 +783,28 @@ def update(self, **values): nulled_columns.add(name) continue # add the update statements - if isinstance(col, (BaseContainerColumn, Counter)): - val_mgr = self.instance._values[name] - set_statements += col.get_update_statement(val, val_mgr.previous_value, ctx) - + if isinstance(col, Counter): + # TODO: implement counter updates + raise NotImplementedError else: - field_id = uuid4().hex - set_statements += ['"{}" = :{}'.format(col.db_field_name, field_id)] - ctx[field_id] = val - - if set_statements: - ttl_stmt = "USING TTL {}".format(self._ttl) if self._ttl else "" - qs = "UPDATE {} SET {} WHERE {} {}".format( - self.column_family_name, - ', '.join(set_statements), - self._where_clause(), - ttl_stmt - ) - ctx.update(self._where_values()) + us.add_assignment_clause(AssignmentClause(name, col.to_database(val))) + + if us.assignments: + qs = str(us) + ctx = us.get_context() if self._batch: self._batch.add_query(qs, ctx) else: execute(qs, ctx, self._consistency) if nulled_columns: - qs = "DELETE {} FROM {} WHERE {}".format( - ', '.join(nulled_columns), - self.column_family_name, - self._where_clause() - ) + ds = DeleteStatement(self.column_family_name, fields=nulled_columns, where=self._where) + qs = str(ds) + ctx = ds.get_context() if self._batch: - self._batch.add_query(qs, self._where_values()) + self._batch.add_query(qs, ctx) else: - execute(qs, self._where_values(), self._consistency) + execute(qs, ctx, self._consistency) class DMLQuery(object): diff --git a/cqlengine/tests/query/test_updates.py b/cqlengine/tests/query/test_updates.py index 0c9c88cf21..b54b294c60 100644 --- a/cqlengine/tests/query/test_updates.py +++ b/cqlengine/tests/query/test_updates.py @@ -112,3 +112,6 @@ def test_mixed_value_and_null_update(self): assert row.cluster == i assert row.count == (6 if i == 3 else i) assert row.text == (None if i == 3 else str(i)) + + def test_counter_updates(self): + pass From 3a5a09fe5ad8d5b6d47ca7b24abd2eaeaaa001d9 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 3 Nov 2013 08:46:12 -0800 Subject: [PATCH 0518/3726] fixing more where construction tests --- cqlengine/tests/query/test_named.py | 29 ++++++++++++++++++----------- 1 file changed, 18 insertions(+), 11 deletions(-) diff --git a/cqlengine/tests/query/test_named.py b/cqlengine/tests/query/test_named.py index e63261f79c..38df15420a 100644 --- a/cqlengine/tests/query/test_named.py +++ b/cqlengine/tests/query/test_named.py @@ -22,14 +22,14 @@ def test_query_filter_parsing(self): assert len(query1._where) == 1 op = query1._where[0] - assert isinstance(op, operators.EqualsOperator) + assert isinstance(op.operator, operators.EqualsOperator) assert op.value == 5 query2 = query1.filter(expected_result__gte=1) assert len(query2._where) == 2 op = query2._where[1] - assert isinstance(op, operators.GreaterThanOrEqualOperator) + assert isinstance(op.operator, operators.GreaterThanOrEqualOperator) assert op.value == 1 def test_query_expression_parsing(self): @@ -59,8 +59,7 @@ def test_filter_method_where_clause_generation(self): self.assertEqual(where.value, 5) query2 = query1.filter(expected_result__gte=1) - where = query2._where - self.assertEqual(len(where), 2) + self.assertEqual(len(query2._where), 2) where = query2._where[0] self.assertEqual(where.field, 'test_id') @@ -77,14 +76,23 @@ def test_query_expression_where_clause_generation(self): Tests the where clause creation """ query1 = self.table.objects(self.table.column('test_id') == 5) - ids = [o.query_value.identifier for o in query1._where] - where = query1._where_clause() - assert where == '"test_id" = :{}'.format(*ids) + self.assertEqual(len(query1._where), 1) + where = query1._where[0] + self.assertEqual(where.field, 'test_id') + self.assertEqual(where.value, 5) query2 = query1.filter(self.table.column('expected_result') >= 1) - ids = [o.query_value.identifier for o in query2._where] - where = query2._where_clause() - assert where == '"test_id" = :{} AND "expected_result" >= :{}'.format(*ids) + self.assertEqual(len(query2._where), 2) + + where = query2._where[0] + self.assertEqual(where.field, 'test_id') + self.assertIsInstance(where.operator, EqualsOperator) + self.assertEqual(where.value, 5) + + where = query2._where[1] + self.assertEqual(where.field, 'expected_result') + self.assertIsInstance(where.operator, GreaterThanOrEqualOperator) + self.assertEqual(where.value, 1) class TestQuerySetCountSelectionAndIteration(BaseQuerySetUsage): @@ -99,7 +107,6 @@ def setUpClass(cls): cls.keyspace = NamedKeyspace(ks) cls.table = cls.keyspace.table(tn) - def test_count(self): """ Tests that adding filtering statements affects the count query as expected """ assert self.table.objects.count() == 12 From f426e0138e1eb25304c01e051ec5d01295e9c4ee Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 3 Nov 2013 09:53:51 -0800 Subject: [PATCH 0519/3726] hacking in the query functions, also getting all tests to pass --- cqlengine/columns.py | 9 ++- cqlengine/functions.py | 63 ++++++++++---------- cqlengine/named.py | 4 ++ cqlengine/query.py | 8 ++- cqlengine/statements.py | 25 +++++++- cqlengine/tests/query/test_queryoperators.py | 34 +++++++---- 6 files changed, 93 insertions(+), 50 deletions(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index 79b90e25b9..05ace2bb7f 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -865,8 +865,15 @@ def __init__(self, model): self.partition_columns = model._partition_keys.values() super(_PartitionKeysToken, self).__init__(partition_key=True) + @property + def db_field_name(self): + return 'token({})'.format(', '.join(['"{}"'.format(c.db_field_name) for c in self.partition_columns])) + def to_database(self, value): - raise NotImplementedError + from cqlengine.functions import Token + assert isinstance(value, Token) + value.set_columns(self.partition_columns) + return value def get_cql(self): return "token({})".format(", ".join(c.cql for c in self.partition_columns)) diff --git a/cqlengine/functions.py b/cqlengine/functions.py index 136618453c..ceb6a78be9 100644 --- a/cqlengine/functions.py +++ b/cqlengine/functions.py @@ -9,24 +9,24 @@ class QueryValue(object): be passed into .filter() keyword args """ - _cql_string = ':{}' + format_string = ':{}' - def __init__(self, value, identifier=None): + def __init__(self, value): self.value = value - self.identifier = uuid1().hex if identifier is None else identifier + self.context_id = None + + def __unicode__(self): + return self.format_string.format(self.context_id) - def get_cql(self): - return self._cql_string.format(self.identifier) + def set_context_id(self, ctx_id): + self.context_id = ctx_id - def get_value(self): - return self.value + def get_context_size(self): + return 1 - def get_dict(self, column): - return {self.identifier: column.to_database(self.get_value())} + def update_context(self, ctx): + ctx[str(self.context_id)] = self.value - @property - def cql(self): - return self.get_cql() class BaseQueryFunction(QueryValue): """ @@ -42,7 +42,7 @@ class MinTimeUUID(BaseQueryFunction): http://cassandra.apache.org/doc/cql3/CQL.html#timeuuidFun """ - _cql_string = 'MinTimeUUID(:{})' + format_string = 'MinTimeUUID(:{})' def __init__(self, value): """ @@ -53,14 +53,11 @@ def __init__(self, value): raise ValidationError('datetime instance is required') super(MinTimeUUID, self).__init__(value) - def get_value(self): + def update_context(self, ctx): epoch = datetime(1970, 1, 1, tzinfo=self.value.tzinfo) offset = epoch.tzinfo.utcoffset(epoch).total_seconds() if epoch.tzinfo else 0 + ctx[str(self.context_id)] = long(((self.value - epoch).total_seconds() - offset) * 1000) - return long(((self.value - epoch).total_seconds() - offset) * 1000) - - def get_dict(self, column): - return {self.identifier: self.get_value()} class MaxTimeUUID(BaseQueryFunction): """ @@ -69,7 +66,7 @@ class MaxTimeUUID(BaseQueryFunction): http://cassandra.apache.org/doc/cql3/CQL.html#timeuuidFun """ - _cql_string = 'MaxTimeUUID(:{})' + format_string = 'MaxTimeUUID(:{})' def __init__(self, value): """ @@ -80,14 +77,11 @@ def __init__(self, value): raise ValidationError('datetime instance is required') super(MaxTimeUUID, self).__init__(value) - def get_value(self): + def update_context(self, ctx): epoch = datetime(1970, 1, 1, tzinfo=self.value.tzinfo) offset = epoch.tzinfo.utcoffset(epoch).total_seconds() if epoch.tzinfo else 0 + ctx[str(self.context_id)] = long(((self.value - epoch).total_seconds() - offset) * 1000) - return long(((self.value - epoch).total_seconds() - offset) * 1000) - - def get_dict(self, column): - return {self.identifier: self.get_value()} class Token(BaseQueryFunction): """ @@ -99,15 +93,20 @@ class Token(BaseQueryFunction): def __init__(self, *values): if len(values) == 1 and isinstance(values[0], (list, tuple)): values = values[0] - super(Token, self).__init__(values, [uuid1().hex for i in values]) + super(Token, self).__init__(values) + self._columns = None + + def set_columns(self, columns): + self._columns = columns - def get_dict(self, column): - items = zip(self.identifier, self.value, column.partition_columns) - return dict( - (id, col.to_database(val)) for id, val, col in items - ) + def get_context_size(self): + return len(self.value) - def get_cql(self): - token_args = ', '.join(':{}'.format(id) for id in self.identifier) + def __unicode__(self): + token_args = ', '.join(':{}'.format(self.context_id + i) for i in range(self.get_context_size())) return "token({})".format(token_args) + def update_context(self, ctx): + for i, (col, val) in enumerate(zip(self._columns, self.value)): + ctx[str(self.context_id + i)] = col.to_database(val) + diff --git a/cqlengine/named.py b/cqlengine/named.py index 2b75443144..c6ba3ac995 100644 --- a/cqlengine/named.py +++ b/cqlengine/named.py @@ -40,6 +40,10 @@ def _get_column(self): """ :rtype: NamedColumn """ return self + @property + def db_field_name(self): + return self.name + @property def cql(self): return self.get_cql() diff --git a/cqlengine/query.py b/cqlengine/query.py index 545d5810ae..0418883fef 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -10,7 +10,7 @@ from cqlengine.connection import connection_manager, execute, RowResult from cqlengine.exceptions import CQLEngineException, ValidationError -from cqlengine.functions import QueryValue, Token +from cqlengine.functions import QueryValue, Token, BaseQueryFunction #CQL 3 reference: #http://www.datastax.com/docs/1.1/references/cql/index @@ -480,12 +480,14 @@ def filter(self, *args, **kwargs): for arg, val in kwargs.items(): col_name, col_op = self._parse_filter_arg(arg) + quote_field = True #resolve column and operator try: column = self.model._get_column(col_name) except KeyError: if col_name == 'pk__token': column = columns._PartitionKeysToken(self.model) + quote_field = False else: raise QueryException("Can't resolve column name: '{}'".format(col_name)) @@ -497,10 +499,12 @@ def filter(self, *args, **kwargs): if not isinstance(val, (list, tuple)): raise QueryException('IN queries must use a list/tuple value') query_val = [column.to_database(v) for v in val] + elif isinstance(val, BaseQueryFunction): + query_val = val else: query_val = column.to_database(val) - clone._where.append(WhereClause(col_name, operator, query_val)) + clone._where.append(WhereClause(column.db_field_name, operator, query_val, quote_field=quote_field)) return clone diff --git a/cqlengine/statements.py b/cqlengine/statements.py index f974a18951..834e647924 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -1,3 +1,4 @@ +from cqlengine.functions import QueryValue from cqlengine.operators import BaseWhereOperator, InOperator @@ -78,16 +79,27 @@ def update_context(self, ctx): class WhereClause(BaseClause): """ a single where statement used in queries """ - def __init__(self, field, operator, value): + def __init__(self, field, operator, value, quote_field=True): + """ + + :param field: + :param operator: + :param value: + :param quote_field: hack to get the token function rendering properly + :return: + """ if not isinstance(operator, BaseWhereOperator): raise StatementException( "operator must be of type {}, got {}".format(BaseWhereOperator, type(operator)) ) super(WhereClause, self).__init__(field, value) self.operator = operator + self.query_value = self.value if isinstance(self.value, QueryValue) else QueryValue(self.value) + self.quote_field = quote_field def __unicode__(self): - return u'"{}" {} :{}'.format(self.field, self.operator, self.context_id) + field = ('"{}"' if self.quote_field else '{}').format(self.field) + return u'{} {} {}'.format(field, self.operator, unicode(self.query_value)) def __hash__(self): return super(WhereClause, self).__hash__() ^ hash(self.operator) @@ -97,11 +109,18 @@ def __eq__(self, other): return self.operator.__class__ == other.operator.__class__ return False + def get_context_size(self): + return self.query_value.get_context_size() + + def set_context_id(self, i): + super(WhereClause, self).set_context_id(i) + self.query_value.set_context_id(i) + def update_context(self, ctx): if isinstance(self.operator, InOperator): ctx[str(self.context_id)] = InQuoter(self.value) else: - super(WhereClause, self).update_context(ctx) + self.query_value.update_context(ctx) class AssignmentClause(BaseClause): diff --git a/cqlengine/tests/query/test_queryoperators.py b/cqlengine/tests/query/test_queryoperators.py index 8f5243fbab..11dec4dded 100644 --- a/cqlengine/tests/query/test_queryoperators.py +++ b/cqlengine/tests/query/test_queryoperators.py @@ -1,10 +1,12 @@ from datetime import datetime -import time +from cqlengine.columns import DateTime from cqlengine.tests.base import BaseCassEngTestCase from cqlengine import columns, Model from cqlengine import functions from cqlengine import query +from cqlengine.statements import WhereClause +from cqlengine.operators import EqualsOperator class TestQuerySetOperation(BaseCassEngTestCase): @@ -13,22 +15,26 @@ def test_maxtimeuuid_function(self): Tests that queries with helper functions are generated properly """ now = datetime.now() - col = columns.DateTime() - col.set_column_name('time') - qry = query.EqualsOperator(col, functions.MaxTimeUUID(now)) + where = WhereClause('time', EqualsOperator(), functions.MaxTimeUUID(now)) + where.set_context_id(5) - assert qry.cql == '"time" = MaxTimeUUID(:{})'.format(qry.value.identifier) + self.assertEqual(str(where), '"time" = MaxTimeUUID(:5)') + ctx = {} + where.update_context(ctx) + self.assertEqual(ctx, {'5': DateTime().to_database(now)}) def test_mintimeuuid_function(self): """ Tests that queries with helper functions are generated properly """ now = datetime.now() - col = columns.DateTime() - col.set_column_name('time') - qry = query.EqualsOperator(col, functions.MinTimeUUID(now)) + where = WhereClause('time', EqualsOperator(), functions.MinTimeUUID(now)) + where.set_context_id(5) - assert qry.cql == '"time" = MinTimeUUID(:{})'.format(qry.value.identifier) + self.assertEqual(str(where), '"time" = MinTimeUUID(:5)') + ctx = {} + where.update_context(ctx) + self.assertEqual(ctx, {'5': DateTime().to_database(now)}) def test_token_function(self): @@ -39,12 +45,16 @@ class TestModel(Model): func = functions.Token('a', 'b') q = TestModel.objects.filter(pk__token__gt=func) - self.assertEquals(q._where[0].cql, 'token("p1", "p2") > token(:{}, :{})'.format(*func.identifier)) + where = q._where[0] + where.set_context_id(1) + self.assertEquals(str(where), 'token("p1", "p2") > token(:{}, :{})'.format(1, 2)) - # Token(tuple()) is also possible for convinience + # Token(tuple()) is also possible for convenience # it (allows for Token(obj.pk) syntax) func = functions.Token(('a', 'b')) q = TestModel.objects.filter(pk__token__gt=func) - self.assertEquals(q._where[0].cql, 'token("p1", "p2") > token(:{}, :{})'.format(*func.identifier)) + where = q._where[0] + where.set_context_id(1) + self.assertEquals(str(where), 'token("p1", "p2") > token(:{}, :{})'.format(1, 2)) From b9fcb20ef37ad37518d436db26204d28bba37966 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 3 Nov 2013 12:00:54 -0800 Subject: [PATCH 0520/3726] removing ttl statement method --- cqlengine/query.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index 0418883fef..90805820d9 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -738,11 +738,6 @@ def _get_ordering_condition(self, colname): return column.db_field_name, order_type - def _get_ttl_statement(self): - if not self._ttl: - return "" - return "USING TTL {}".format(self._ttl) - def values_list(self, *fields, **kwargs): """ Instructs the query set to return tuples, not model instance """ flat = kwargs.pop('flat', False) From dcdc555422d88e538a078b6f4e7a74993a102bac Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 3 Nov 2013 12:19:00 -0800 Subject: [PATCH 0521/3726] moving delete query to delete statement --- cqlengine/query.py | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index 90805820d9..c5e36618a3 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -1003,21 +1003,20 @@ def delete(self): """ Deletes one instance """ if self.instance is None: raise CQLEngineException("DML Query intance attribute is None") - field_values = {} - qs = ['DELETE FROM {}'.format(self.column_family_name)] - qs += ['WHERE'] - where_statements = [] - for name, col in self.model._primary_keys.items(): - field_id = uuid4().hex - field_values[field_id] = col.to_database(getattr(self.instance, name)) - where_statements += ['"{}" = :{}'.format(col.db_field_name, field_id)] - - qs += [' AND '.join(where_statements)] - qs = ' '.join(qs) + ds = DeleteStatement(self.column_family_name) + for name, col in self.model._primary_keys.items(): + ds.add_where_clause(WhereClause( + col.db_field_name, + EqualsOperator(), + col.to_database(getattr(self.instance, name)) + )) + + qs = str(ds) + ctx = ds.get_context() if self._batch: - self._batch.add_query(qs, field_values) + self._batch.add_query(qs, ctx) else: - execute(qs, field_values, self._consistency) + execute(qs, ctx, self._consistency) From 25e787a82cb18882993ae2812f1f7b40051eb7bc Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 3 Nov 2013 13:34:35 -0800 Subject: [PATCH 0522/3726] moving save insert to insert statement --- cqlengine/query.py | 38 ++++++++++++++++++++------------------ cqlengine/statements.py | 7 +++++++ 2 files changed, 27 insertions(+), 18 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index c5e36618a3..35e58459b4 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -16,7 +16,7 @@ #http://www.datastax.com/docs/1.1/references/cql/index from cqlengine.operators import InOperator, EqualsOperator, GreaterThanOperator, GreaterThanOrEqualOperator from cqlengine.operators import LessThanOperator, LessThanOrEqualOperator, BaseWhereOperator -from cqlengine.statements import WhereClause, SelectStatement, DeleteStatement, UpdateStatement, AssignmentClause +from cqlengine.statements import WhereClause, SelectStatement, DeleteStatement, UpdateStatement, AssignmentClause, InsertStatement class QueryException(CQLEngineException): pass @@ -938,6 +938,7 @@ def update(self): self._delete_null_columns() + # TODO: delete def _get_query_values(self): """ returns all the data needed to do queries @@ -970,31 +971,32 @@ def save(self): raise CQLEngineException("DML Query intance attribute is None") assert type(self.instance) == self.model - values, field_names, field_ids, field_values, query_values = self._get_query_values() - - qs = [] + nulled_fields = set() if self.instance._has_counter or self.instance._can_update(): return self.update() else: - qs += ["INSERT INTO {}".format(self.column_family_name)] - qs += ["({})".format(', '.join(['"{}"'.format(f) for f in field_names]))] - qs += ['VALUES'] - qs += ["({})".format(', '.join([':'+field_ids[f] for f in field_names]))] - - if self._ttl: - qs += ["USING TTL {}".format(self._ttl)] - - qs += [] - qs = ' '.join(qs) - + insert = InsertStatement(self.column_family_name, ttl=self._ttl) + for name, col in self.instance._columns.items(): + val = getattr(self.instance, name, None) + if col._val_is_null(val): + if self.instance._values[name].changed: + nulled_fields.add(col.db_field_name) + continue + insert.add_assignment_clause(AssignmentClause( + col.db_field_name, + col.to_database(getattr(self.instance, name, None)) + )) # skip query execution if it's empty # caused by pointless update queries - if qs: + if not insert.is_empty: + qs = str(insert) + ctx = insert.get_context() + if self._batch: - self._batch.add_query(qs, query_values) + self._batch.add_query(qs, ctx) else: - execute(qs, query_values, self._consistency) + execute(qs, ctx, self._consistency) # delete any nulled columns self._delete_null_columns() diff --git a/cqlengine/statements.py b/cqlengine/statements.py index 834e647924..ce8de5836b 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -414,6 +414,9 @@ def __unicode__(self): def __str__(self): return unicode(self).encode('utf-8') + def __repr__(self): + return self.__unicode__() + @property def _where(self): return 'WHERE {}'.format(' AND '.join([unicode(c) for c in self.where_clauses])) @@ -500,6 +503,10 @@ def add_assignment_clause(self, clause): self.context_counter += clause.get_context_size() self.assignments.append(clause) + @property + def is_empty(self): + return len(self.assignments) == 0 + def get_context(self): ctx = super(AssignmentStatement, self).get_context() for clause in self.assignments: From 042d0aee0e21d1c80ec9a61c83dce11de874b501 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 3 Nov 2013 15:24:37 -0800 Subject: [PATCH 0523/3726] adding BaseCQLStatemtent support to query execution --- cqlengine/connection.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index 6fec0d3d64..6e08a3580f 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -17,6 +17,7 @@ from contextlib import contextmanager from thrift.transport.TTransport import TTransportException +from cqlengine.statements import BaseCQLStatement LOG = logging.getLogger('cqlengine.cql') @@ -230,6 +231,9 @@ def execute(self, query, params, consistency_level=None): def execute(query, params=None, consistency_level=None): + if isinstance(query, BaseCQLStatement): + params = query.get_context() + query = str(query) params = params or {} if consistency_level is None: consistency_level = connection_pool._consistency From 8767fb18cacc1d33930c8281986db5964acb67d8 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 3 Nov 2013 15:24:56 -0800 Subject: [PATCH 0524/3726] encapsulating raw or batch query execution into a querset method --- cqlengine/query.py | 58 ++++++++++++++++++++-------------------------- 1 file changed, 25 insertions(+), 33 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index 35e58459b4..55588546dc 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -16,7 +16,7 @@ #http://www.datastax.com/docs/1.1/references/cql/index from cqlengine.operators import InOperator, EqualsOperator, GreaterThanOperator, GreaterThanOrEqualOperator from cqlengine.operators import LessThanOperator, LessThanOrEqualOperator, BaseWhereOperator -from cqlengine.statements import WhereClause, SelectStatement, DeleteStatement, UpdateStatement, AssignmentClause, InsertStatement +from cqlengine.statements import WhereClause, SelectStatement, DeleteStatement, UpdateStatement, AssignmentClause, InsertStatement, BaseCQLStatement class QueryException(CQLEngineException): pass @@ -229,7 +229,10 @@ def __init__(self, batch_type=None, timestamp=None, consistency=None): self._consistency = consistency def add_query(self, query, params): - self.queries.append((query, params)) + if not isinstance(query, BaseCQLStatement): + raise CQLEngineException('only BaseCQLStatements can be added to a batch query') + # TODO: modify query's context id starting point + self.queries.append((str(query), query.get_context())) def consistency(self, consistency): self._consistency = consistency @@ -305,6 +308,12 @@ def __init__(self, model): def column_family_name(self): return self.model.column_family_name() + def _execute(self, q, params=None): + if self._batch: + return self._batch.add_query(q, params=params) + else: + return execute(q, params=params, consistency_level=self._consistency) + def __unicode__(self): return self._select_query() @@ -364,8 +373,7 @@ def _execute_query(self): if self._batch: raise CQLEngineException("Only inserts, updates, and deletes are available in batch mode") if self._result_cache is None: - query = self._select_query() - columns, self._result_cache = execute(unicode(query).encode('utf-8'), query.get_context(), self._consistency) + columns, self._result_cache = self._execute(self._select_query()) self._construct_result = self._get_result_constructor(columns) def _fill_result_cache_to_idx(self, idx): @@ -561,7 +569,7 @@ def count(self): if self._result_cache is None: query = self._select_query() query.count = True - _, result = execute(str(query), query.get_context()) + _, result = self._execute(query) return result[0][0] else: return len(self._result_cache) @@ -637,11 +645,7 @@ def delete(self): self.column_family_name, where=self._where ) - - if self._batch: - self._batch.add_query(str(dq), dq.get_context()) - else: - execute(str(dq), dq.get_context()) + self._execute(dq) def __eq__(self, q): if len(self._where) == len(q._where): @@ -825,6 +829,12 @@ def __init__(self, model, instance=None, batch=None, ttl=None, consistency=None) self._ttl = ttl self._consistency = consistency + def _execute(self, q, params=None): + if self._batch: + return self._batch.add_query(q, params=params) + else: + return execute(q, params=params, consistency_level=self._consistency) + def batch(self, batch_obj): if batch_obj is not None and not isinstance(batch_obj, BatchQuery): raise CQLEngineException('batch_obj must be a BatchQuery instance or None') @@ -864,10 +874,7 @@ def _delete_null_columns(self): qs = ' '.join(qs) - if self._batch: - self._batch.add_query(qs, query_values) - else: - execute(qs, query_values) + self._execute(qs, query_values) def update(self): """ @@ -931,10 +938,7 @@ def update(self): # skip query execution if it's empty # caused by pointless update queries if qs: - if self._batch: - self._batch.add_query(qs, query_values) - else: - execute(qs, query_values, consistency_level=self._consistency) + self._execute(qs, query_values) self._delete_null_columns() @@ -990,13 +994,7 @@ def save(self): # skip query execution if it's empty # caused by pointless update queries if not insert.is_empty: - qs = str(insert) - ctx = insert.get_context() - - if self._batch: - self._batch.add_query(qs, ctx) - else: - execute(qs, ctx, self._consistency) + self._execute(insert) # delete any nulled columns self._delete_null_columns() @@ -1004,7 +1002,7 @@ def save(self): def delete(self): """ Deletes one instance """ if self.instance is None: - raise CQLEngineException("DML Query intance attribute is None") + raise CQLEngineException("DML Query instance attribute is None") ds = DeleteStatement(self.column_family_name) for name, col in self.model._primary_keys.items(): @@ -1013,12 +1011,6 @@ def delete(self): EqualsOperator(), col.to_database(getattr(self.instance, name)) )) - - qs = str(ds) - ctx = ds.get_context() - if self._batch: - self._batch.add_query(qs, ctx) - else: - execute(qs, ctx, self._consistency) + self._execute(ds) From 095ef4d89a2bf273f1675756607ecfd43bb3f619 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 3 Nov 2013 15:48:19 -0800 Subject: [PATCH 0525/3726] moving delete null columns to delete statement --- cqlengine/query.py | 61 ++++++++++++++--------------------------- cqlengine/statements.py | 8 ++++++ 2 files changed, 29 insertions(+), 40 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index 55588546dc..c7df62cb65 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -16,7 +16,7 @@ #http://www.datastax.com/docs/1.1/references/cql/index from cqlengine.operators import InOperator, EqualsOperator, GreaterThanOperator, GreaterThanOrEqualOperator from cqlengine.operators import LessThanOperator, LessThanOrEqualOperator, BaseWhereOperator -from cqlengine.statements import WhereClause, SelectStatement, DeleteStatement, UpdateStatement, AssignmentClause, InsertStatement, BaseCQLStatement +from cqlengine.statements import WhereClause, SelectStatement, DeleteStatement, UpdateStatement, AssignmentClause, InsertStatement, BaseCQLStatement, MapUpdateClause, MapDeleteClause class QueryException(CQLEngineException): pass @@ -310,9 +310,9 @@ def column_family_name(self): def _execute(self, q, params=None): if self._batch: - return self._batch.add_query(q, params=params) + return self._batch.add_query(q) else: - return execute(q, params=params, consistency_level=self._consistency) + return execute(q, consistency_level=self._consistency) def __unicode__(self): return self._select_query() @@ -793,21 +793,11 @@ def update(self, **values): us.add_assignment_clause(AssignmentClause(name, col.to_database(val))) if us.assignments: - qs = str(us) - ctx = us.get_context() - if self._batch: - self._batch.add_query(qs, ctx) - else: - execute(qs, ctx, self._consistency) + self._execute(us) if nulled_columns: ds = DeleteStatement(self.column_family_name, fields=nulled_columns, where=self._where) - qs = str(ds) - ctx = ds.get_context() - if self._batch: - self._batch.add_query(qs, ctx) - else: - execute(qs, ctx, self._consistency) + self._execute(ds) class DMLQuery(object): @@ -845,36 +835,27 @@ def _delete_null_columns(self): """ executes a delete query to remove columns that have changed to null """ - values, field_names, field_ids, field_values, query_values = self._get_query_values() - - # delete nulled columns and removed map keys - qs = ['DELETE'] - query_values = {} - - del_statements = [] - for k,v in self.instance._values.items(): + ds = DeleteStatement(self.column_family_name) + deleted_fields = False + for _, v in self.instance._values.items(): col = v.column if v.deleted: - del_statements += ['"{}"'.format(col.db_field_name)] + ds.add_field(col.db_field_name) + deleted_fields = True elif isinstance(col, Map): - del_statements += col.get_delete_statement(v.value, v.previous_value, query_values) + uc = MapDeleteClause(col.db_field_name, v.value, v.previous_value) + if uc.get_context_size() > 0: + ds.add_field(uc) + deleted_fields = True - if del_statements: - qs += [', '.join(del_statements)] - - qs += ['FROM {}'.format(self.column_family_name)] - - qs += ['WHERE'] - where_statements = [] + if deleted_fields: for name, col in self.model._primary_keys.items(): - field_id = uuid4().hex - query_values[field_id] = field_values[name] - where_statements += ['"{}" = :{}'.format(col.db_field_name, field_id)] - qs += [' AND '.join(where_statements)] - - qs = ' '.join(qs) - - self._execute(qs, query_values) + ds.add_where_clause(WhereClause( + col.db_field_name, + EqualsOperator(), + col.to_database(getattr(self.instance, name)) + )) + self._execute(ds) def update(self): """ diff --git a/cqlengine/statements.py b/cqlengine/statements.py index ce8de5836b..4156c9ac6e 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -569,11 +569,19 @@ def __init__(self, table, fields=None, consistency=None, where=None): for field in fields or []: self.add_field(field) + def get_context(self): + ctx = super(DeleteStatement, self).get_context() + for field in self.fields: + field.update_context(ctx) + return ctx + def add_field(self, field): if isinstance(field, basestring): field = FieldDeleteClause(field) if not isinstance(field, BaseClause): raise StatementException("only instances of AssignmentClause can be added to statements") + field.set_context_id(self.context_counter) + self.context_counter += field.get_context_size() self.fields.append(field) def __unicode__(self): From 845a52a08196f472663d5bd8432056daaf1eb5ed Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 3 Nov 2013 16:03:50 -0800 Subject: [PATCH 0526/3726] moving update query to update statement --- cqlengine/query.py | 95 ++++++++++++++--------------------------- cqlengine/statements.py | 35 +++++++++------ 2 files changed, 53 insertions(+), 77 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index c7df62cb65..f5c15e3a23 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -5,7 +5,7 @@ from time import time from uuid import uuid1 from cqlengine import BaseContainerColumn, BaseValueManager, Map, columns -from cqlengine.columns import Counter +from cqlengine.columns import Counter, List, Set from cqlengine.connection import connection_manager, execute, RowResult @@ -16,7 +16,7 @@ #http://www.datastax.com/docs/1.1/references/cql/index from cqlengine.operators import InOperator, EqualsOperator, GreaterThanOperator, GreaterThanOrEqualOperator from cqlengine.operators import LessThanOperator, LessThanOrEqualOperator, BaseWhereOperator -from cqlengine.statements import WhereClause, SelectStatement, DeleteStatement, UpdateStatement, AssignmentClause, InsertStatement, BaseCQLStatement, MapUpdateClause, MapDeleteClause +from cqlengine.statements import WhereClause, SelectStatement, DeleteStatement, UpdateStatement, AssignmentClause, InsertStatement, BaseCQLStatement, MapUpdateClause, MapDeleteClause, ListUpdateClause, SetUpdateClause class QueryException(CQLEngineException): pass @@ -868,83 +868,52 @@ def update(self): raise CQLEngineException("DML Query intance attribute is None") assert type(self.instance) == self.model - values, field_names, field_ids, field_values, query_values = self._get_query_values() - - qs = [] - qs += ["UPDATE {}".format(self.column_family_name)] - qs += ["SET"] - - set_statements = [] + statement = UpdateStatement(self.column_family_name, ttl=self._ttl) #get defined fields and their column names for name, col in self.model._columns.items(): if not col.is_primary_key: - val = values.get(name) + val = getattr(self.instance, name, None) + val_mgr = self.instance._values[name] # don't update something that is null if val is None: continue # don't update something if it hasn't changed - if not self.instance._values[name].changed and not isinstance(col, Counter): + if not val_mgr.changed and not isinstance(col, Counter): continue - # add the update statements - if isinstance(col, (BaseContainerColumn, Counter)): - #remove value from query values, the column will handle it - query_values.pop(field_ids.get(name), None) - - val_mgr = self.instance._values[name] - set_statements += col.get_update_statement(val, val_mgr.previous_value, query_values) - + if isinstance(col, List): + clause = ListUpdateClause(col.db_field_name, val, val_mgr.previous_value, column=col) + if clause.get_context_size() > 0: + statement.add_assignment_clause(clause) + elif isinstance(col, Map): + clause = MapUpdateClause(col.db_field_name, val, val_mgr.previous_value, column=col) + if clause.get_context_size() > 0: + statement.add_assignment_clause(clause) + elif isinstance(col, Set): + clause = SetUpdateClause(col.db_field_name, val, val_mgr.previous_value, column=col) + if clause.get_context_size() > 0: + statement.add_assignment_clause(clause) + elif isinstance(col, Counter): + raise NotImplementedError else: - set_statements += ['"{}" = :{}'.format(col.db_field_name, field_ids[col.db_field_name])] - qs += [', '.join(set_statements)] - - qs += ['WHERE'] + statement.add_assignment_clause(AssignmentClause( + col.db_field_name, + col.to_database(val) + )) - where_statements = [] - for name, col in self.model._primary_keys.items(): - where_statements += ['"{}" = :{}'.format(col.db_field_name, field_ids[col.db_field_name])] - - qs += [' AND '.join(where_statements)] - - if self._ttl: - qs += ["USING TTL {}".format(self._ttl)] - - # clear the qs if there are no set statements and this is not a counter model - if not set_statements and not self.instance._has_counter: - qs = [] - - qs = ' '.join(qs) - # skip query execution if it's empty - # caused by pointless update queries - if qs: - self._execute(qs, query_values) + if statement.get_context_size() > 0 or self.instance._has_counter: + for name, col in self.model._primary_keys.items(): + statement.add_where_clause(WhereClause( + col.db_field_name, + EqualsOperator(), + col.to_database(getattr(self.instance, name)) + )) + self._execute(statement) self._delete_null_columns() - # TODO: delete - def _get_query_values(self): - """ - returns all the data needed to do queries - """ - #organize data - value_pairs = [] - values = self.instance._as_dict() - - #get defined fields and their column names - for name, col in self.model._columns.items(): - val = values.get(name) - if col._val_is_null(val): continue - value_pairs += [(col.db_field_name, val)] - - #construct query string - field_names = zip(*value_pairs)[0] - field_ids = {n:uuid4().hex for n in field_names} - field_values = dict(value_pairs) - query_values = {field_ids[n]:field_values[n] for n in field_names} - return values, field_names, field_ids, field_values, query_values - def save(self): """ Creates / updates a row. diff --git a/cqlengine/statements.py b/cqlengine/statements.py index 4156c9ac6e..ba13ea0b11 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -135,11 +135,15 @@ def insert_tuple(self): class ContainerUpdateClause(AssignmentClause): - def __init__(self, field, value, previous=None): + def __init__(self, field, value, previous=None, column=None): super(ContainerUpdateClause, self).__init__(field, value) self.previous = previous self._assignments = None self._analyzed = False + self._column = column + + def _to_database(self, val): + return self._column.to_database(val) if self._column else val def _analyze(self): raise NotImplementedError @@ -154,8 +158,8 @@ def update_context(self, ctx): class SetUpdateClause(ContainerUpdateClause): """ updates a set collection """ - def __init__(self, field, value, previous=None): - super(SetUpdateClause, self).__init__(field, value, previous) + def __init__(self, field, value, previous=None, column=None): + super(SetUpdateClause, self).__init__(field, value, previous, column=column) self._additions = None self._removals = None @@ -193,20 +197,20 @@ def update_context(self, ctx): if not self._analyzed: self._analyze() ctx_id = self.context_id if self._assignments: - ctx[str(ctx_id)] = self._assignments + ctx[str(ctx_id)] = self._to_database(self._assignments) ctx_id += 1 if self._additions: - ctx[str(ctx_id)] = self._additions + ctx[str(ctx_id)] = self._to_database(self._additions) ctx_id += 1 if self._removals: - ctx[str(ctx_id)] = self._removals + ctx[str(ctx_id)] = self._to_database(self._removals) class ListUpdateClause(ContainerUpdateClause): """ updates a list collection """ - def __init__(self, field, value, previous=None): - super(ListUpdateClause, self).__init__(field, value, previous) + def __init__(self, field, value, previous=None, column=None): + super(ListUpdateClause, self).__init__(field, value, previous, column=column) self._append = None self._prepend = None @@ -235,16 +239,16 @@ def update_context(self, ctx): if not self._analyzed: self._analyze() ctx_id = self.context_id if self._assignments: - ctx[str(ctx_id)] = self._assignments + ctx[str(ctx_id)] = self._to_database(self._assignments) ctx_id += 1 if self._prepend: # CQL seems to prepend element at a time, starting # with the element at idx 0, we can either reverse # it here, or have it inserted in reverse - ctx[str(ctx_id)] = list(reversed(self._prepend)) + ctx[str(ctx_id)] = self._to_database(list(reversed(self._prepend))) ctx_id += 1 if self._append: - ctx[str(ctx_id)] = self._append + ctx[str(ctx_id)] = self._to_database(self._append) def _analyze(self): """ works out the updates to be performed """ @@ -292,8 +296,8 @@ def _analyze(self): class MapUpdateClause(ContainerUpdateClause): """ updates a map collection """ - def __init__(self, field, value, previous=None): - super(MapUpdateClause, self).__init__(field, value, previous) + def __init__(self, field, value, previous=None, column=None): + super(MapUpdateClause, self).__init__(field, value, previous, column=column) self._updates = None self.previous = self.previous or {} @@ -310,7 +314,7 @@ def update_context(self, ctx): ctx_id = self.context_id for key in self._updates or []: ctx[str(ctx_id)] = key - ctx[str(ctx_id + 1)] = self.value.get(key) + ctx[str(ctx_id + 1)] = self._to_database(self.value.get(key)) ctx_id += 2 def __unicode__(self): @@ -408,6 +412,9 @@ def get_context(self): clause.update_context(ctx) return ctx + def get_context_size(self): + return len(self.get_context()) + def __unicode__(self): raise NotImplementedError From e3e7c18376cac94b43fb8278cebeabe751b612a1 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 3 Nov 2013 16:12:54 -0800 Subject: [PATCH 0527/3726] adding counter column update clause and supporting unit tests --- cqlengine/statements.py | 19 +++++++++++ .../statements/test_assignment_clauses.py | 33 ++++++++++++++++++- 2 files changed, 51 insertions(+), 1 deletion(-) diff --git a/cqlengine/statements.py b/cqlengine/statements.py index ba13ea0b11..c1de6414e2 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -329,6 +329,25 @@ def __unicode__(self): return ', '.join(qs) +class CounterUpdateClause(ContainerUpdateClause): + + def __init__(self, field, value, previous=None, column=None): + super(CounterUpdateClause, self).__init__(field, value, previous, column) + self.previous = self.previous or 0 + + def get_context_size(self): + return 1 if self.value != self.previous else 0 + + def update_context(self, ctx): + if self.value != self.previous: + ctx[str(self.context_id)] = self._to_database(abs(self.value - self.previous)) + + def __unicode__(self): + delta = self.value - self.previous + sign = '-' if delta < 0 else '+' + return '"{0}" = "{0}" {1} :{2}'.format(self.field, sign, self.context_id) + + class BaseDeleteClause(BaseClause): pass diff --git a/cqlengine/tests/statements/test_assignment_clauses.py b/cqlengine/tests/statements/test_assignment_clauses.py index 03d2a5ab0f..1b2e430e9a 100644 --- a/cqlengine/tests/statements/test_assignment_clauses.py +++ b/cqlengine/tests/statements/test_assignment_clauses.py @@ -1,5 +1,5 @@ from unittest import TestCase -from cqlengine.statements import AssignmentClause, SetUpdateClause, ListUpdateClause, MapUpdateClause, MapDeleteClause, FieldDeleteClause +from cqlengine.statements import AssignmentClause, SetUpdateClause, ListUpdateClause, MapUpdateClause, MapDeleteClause, FieldDeleteClause, CounterUpdateClause class AssignmentClauseTests(TestCase): @@ -268,6 +268,37 @@ def test_nulled_columns_arent_included(self): self.assertNotIn(1, c._updates) +class CounterUpdateTests(TestCase): + + def test_positive_update(self): + c = CounterUpdateClause('a', 5, 3) + c.set_context_id(5) + + self.assertEqual(c.get_context_size(), 1) + self.assertEqual(str(c), '"a" = "a" + :5') + + ctx = {} + c.update_context(ctx) + self.assertEqual(ctx, {'5': 2}) + + def test_negative_update(self): + c = CounterUpdateClause('a', 4, 7) + c.set_context_id(3) + + self.assertEqual(c.get_context_size(), 1) + self.assertEqual(str(c), '"a" = "a" - :3') + + ctx = {} + c.update_context(ctx) + self.assertEqual(ctx, {'3': 3}) + + def noop_update(self): + c = CounterUpdateClause('a', 5, 5) + c.set_context_id(5) + + self.assertEqual(c.get_context_size(), 0) + + class MapDeleteTests(TestCase): def test_update(self): From e66eecf28b1e1cd91f16f3c74fff1da5c8036179 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 3 Nov 2013 16:16:27 -0800 Subject: [PATCH 0528/3726] adding counter column support to update and condensing the logic --- cqlengine/query.py | 24 +++++++++++------------- 1 file changed, 11 insertions(+), 13 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index f5c15e3a23..b924cae6c2 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -16,7 +16,7 @@ #http://www.datastax.com/docs/1.1/references/cql/index from cqlengine.operators import InOperator, EqualsOperator, GreaterThanOperator, GreaterThanOrEqualOperator from cqlengine.operators import LessThanOperator, LessThanOrEqualOperator, BaseWhereOperator -from cqlengine.statements import WhereClause, SelectStatement, DeleteStatement, UpdateStatement, AssignmentClause, InsertStatement, BaseCQLStatement, MapUpdateClause, MapDeleteClause, ListUpdateClause, SetUpdateClause +from cqlengine.statements import WhereClause, SelectStatement, DeleteStatement, UpdateStatement, AssignmentClause, InsertStatement, BaseCQLStatement, MapUpdateClause, MapDeleteClause, ListUpdateClause, SetUpdateClause, CounterUpdateClause class QueryException(CQLEngineException): pass @@ -883,20 +883,18 @@ def update(self): if not val_mgr.changed and not isinstance(col, Counter): continue - if isinstance(col, List): - clause = ListUpdateClause(col.db_field_name, val, val_mgr.previous_value, column=col) - if clause.get_context_size() > 0: - statement.add_assignment_clause(clause) - elif isinstance(col, Map): - clause = MapUpdateClause(col.db_field_name, val, val_mgr.previous_value, column=col) - if clause.get_context_size() > 0: - statement.add_assignment_clause(clause) - elif isinstance(col, Set): - clause = SetUpdateClause(col.db_field_name, val, val_mgr.previous_value, column=col) + if isinstance(col, (BaseContainerColumn, Counter)): + # get appropriate clause + if isinstance(col, List): klass = ListUpdateClause + elif isinstance(col, Map): klass = MapUpdateClause + elif isinstance(col, Set): klass = SetUpdateClause + elif isinstance(col, Counter): klass = CounterUpdateClause + else: raise RuntimeError + + # do the stuff + clause = klass(col.db_field_name, val, val_mgr.previous_value, column=col) if clause.get_context_size() > 0: statement.add_assignment_clause(clause) - elif isinstance(col, Counter): - raise NotImplementedError else: statement.add_assignment_clause(AssignmentClause( col.db_field_name, From 7a4e2be45ef59d98407d22b9686c3e871f5ddca3 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 3 Nov 2013 16:23:39 -0800 Subject: [PATCH 0529/3726] fixing map context updates --- cqlengine/statements.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/cqlengine/statements.py b/cqlengine/statements.py index c1de6414e2..cfa3eb5fbf 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -313,8 +313,9 @@ def update_context(self, ctx): if not self._analyzed: self._analyze() ctx_id = self.context_id for key in self._updates or []: - ctx[str(ctx_id)] = key - ctx[str(ctx_id + 1)] = self._to_database(self.value.get(key)) + val = self.value.get(key) + ctx[str(ctx_id)] = self._column.key_col.to_database(key) if self._column else key + ctx[str(ctx_id + 1)] = self._column.value_col.to_database(val) if self._column else val ctx_id += 2 def __unicode__(self): From b41a1d6d63cc63bfbb537959cfa8476f98d91097 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 3 Nov 2013 16:31:26 -0800 Subject: [PATCH 0530/3726] getting noop counter column updates working properly --- cqlengine/statements.py | 5 ++--- cqlengine/tests/statements/test_assignment_clauses.py | 7 ++++++- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/cqlengine/statements.py b/cqlengine/statements.py index cfa3eb5fbf..650d237b7c 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -337,11 +337,10 @@ def __init__(self, field, value, previous=None, column=None): self.previous = self.previous or 0 def get_context_size(self): - return 1 if self.value != self.previous else 0 + return 1 def update_context(self, ctx): - if self.value != self.previous: - ctx[str(self.context_id)] = self._to_database(abs(self.value - self.previous)) + ctx[str(self.context_id)] = self._to_database(abs(self.value - self.previous)) def __unicode__(self): delta = self.value - self.previous diff --git a/cqlengine/tests/statements/test_assignment_clauses.py b/cqlengine/tests/statements/test_assignment_clauses.py index 1b2e430e9a..f7fbda38fb 100644 --- a/cqlengine/tests/statements/test_assignment_clauses.py +++ b/cqlengine/tests/statements/test_assignment_clauses.py @@ -296,7 +296,12 @@ def noop_update(self): c = CounterUpdateClause('a', 5, 5) c.set_context_id(5) - self.assertEqual(c.get_context_size(), 0) + self.assertEqual(c.get_context_size(), 1) + self.assertEqual(str(c), '"a" = "a" + :5') + + ctx = {} + c.update_context(ctx) + self.assertEqual(ctx, {'5': 0}) class MapDeleteTests(TestCase): From e7107c3c0c656139e0cc01b3d3fd7ed38e9ec07b Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 3 Nov 2013 16:47:56 -0800 Subject: [PATCH 0531/3726] adding support for changing the root context id of a statement --- cqlengine/statements.py | 22 ++++++++++++++++++- .../tests/statements/test_delete_statement.py | 11 +++++++++- .../tests/statements/test_insert_statement.py | 13 +++++++++++ .../tests/statements/test_select_statement.py | 11 ++++++++++ .../tests/statements/test_update_statement.py | 9 ++++++++ 5 files changed, 64 insertions(+), 2 deletions(-) diff --git a/cqlengine/statements.py b/cqlengine/statements.py index 650d237b7c..dd367a0f88 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -403,7 +403,8 @@ def __init__(self, table, consistency=None, where=None): super(BaseCQLStatement, self).__init__() self.table = table self.consistency = consistency - self.context_counter = 0 + self.context_id = 0 + self.context_counter = self.context_id self.where_clauses = [] for clause in where or []: @@ -434,6 +435,13 @@ def get_context(self): def get_context_size(self): return len(self.get_context()) + def update_context_id(self, i): + self.context_id = i + self.context_counter = self.context_id + for clause in self.where_clauses: + clause.set_context_id(self.context_counter) + self.context_counter += clause.get_context_size() + def __unicode__(self): raise NotImplementedError @@ -517,6 +525,12 @@ def __init__(self, for assignment in assignments or []: self.add_assignment_clause(assignment) + def update_context_id(self, i): + super(AssignmentStatement, self).update_context_id(i) + for assignment in self.assignments: + assignment.set_context_id(self.context_counter) + self.context_counter += assignment.get_context_size() + def add_assignment_clause(self, clause): """ adds an assignment clause to this statement @@ -595,6 +609,12 @@ def __init__(self, table, fields=None, consistency=None, where=None): for field in fields or []: self.add_field(field) + def update_context_id(self, i): + super(DeleteStatement, self).update_context_id(i) + for field in self.fields: + field.set_context_id(self.context_counter) + self.context_counter += field.get_context_size() + def get_context(self): ctx = super(DeleteStatement, self).get_context() for field in self.fields: diff --git a/cqlengine/tests/statements/test_delete_statement.py b/cqlengine/tests/statements/test_delete_statement.py index b1055ea4fb..9ac9554383 100644 --- a/cqlengine/tests/statements/test_delete_statement.py +++ b/cqlengine/tests/statements/test_delete_statement.py @@ -1,5 +1,5 @@ from unittest import TestCase -from cqlengine.statements import DeleteStatement, WhereClause +from cqlengine.statements import DeleteStatement, WhereClause, MapDeleteClause from cqlengine.operators import * @@ -33,6 +33,15 @@ def test_where_clause_rendering(self): ds.add_where_clause(WhereClause('a', EqualsOperator(), 'b')) self.assertEqual(unicode(ds), 'DELETE FROM table WHERE "a" = :0', unicode(ds)) + def test_context_update(self): + ds = DeleteStatement('table', None) + ds.add_field(MapDeleteClause('d', {1: 2}, {1:2, 3: 4})) + ds.add_where_clause(WhereClause('a', EqualsOperator(), 'b')) + + ds.update_context_id(7) + self.assertEqual(unicode(ds), 'DELETE "d"[:8] FROM table WHERE "a" = :7') + self.assertEqual(ds.get_context(), {'7': 'b', '8': 3}) + def test_context(self): ds = DeleteStatement('table', None) ds.add_where_clause(WhereClause('a', EqualsOperator(), 'b')) diff --git a/cqlengine/tests/statements/test_insert_statement.py b/cqlengine/tests/statements/test_insert_statement.py index 51280a314f..5e22d7d99e 100644 --- a/cqlengine/tests/statements/test_insert_statement.py +++ b/cqlengine/tests/statements/test_insert_statement.py @@ -20,6 +20,19 @@ def test_statement(self): 'INSERT INTO table ("a", "c") VALUES (:0, :1)' ) + def test_context_update(self): + ist = InsertStatement('table', None) + ist.add_assignment_clause(AssignmentClause('a', 'b')) + ist.add_assignment_clause(AssignmentClause('c', 'd')) + + ist.update_context_id(4) + self.assertEqual( + unicode(ist), + 'INSERT INTO table ("a", "c") VALUES (:4, :5)' + ) + ctx = ist.get_context() + self.assertEqual(ctx, {'4': 'b', '5': 'd'}) + def test_additional_rendering(self): ist = InsertStatement('table', ttl=60) ist.add_assignment_clause(AssignmentClause('a', 'b')) diff --git a/cqlengine/tests/statements/test_select_statement.py b/cqlengine/tests/statements/test_select_statement.py index d59c8eb77c..f358c6ae5a 100644 --- a/cqlengine/tests/statements/test_select_statement.py +++ b/cqlengine/tests/statements/test_select_statement.py @@ -44,6 +44,17 @@ def test_context(self): ss.add_where_clause(WhereClause('a', EqualsOperator(), 'b')) self.assertEqual(ss.get_context(), {'0': 'b'}) + def test_context_id_update(self): + """ tests that the right things happen the the context id """ + ss = SelectStatement('table') + ss.add_where_clause(WhereClause('a', EqualsOperator(), 'b')) + self.assertEqual(ss.get_context(), {'0': 'b'}) + self.assertEqual(str(ss), 'SELECT * FROM table WHERE "a" = :0') + + ss.update_context_id(5) + self.assertEqual(ss.get_context(), {'5': 'b'}) + self.assertEqual(str(ss), 'SELECT * FROM table WHERE "a" = :5') + def test_additional_rendering(self): ss = SelectStatement( 'table', diff --git a/cqlengine/tests/statements/test_update_statement.py b/cqlengine/tests/statements/test_update_statement.py index 0945d2341e..86dbc8be44 100644 --- a/cqlengine/tests/statements/test_update_statement.py +++ b/cqlengine/tests/statements/test_update_statement.py @@ -25,6 +25,15 @@ def test_context(self): us.add_where_clause(WhereClause('a', EqualsOperator(), 'x')) self.assertEqual(us.get_context(), {'0': 'b', '1': 'd', '2': 'x'}) + def test_context_update(self): + us = UpdateStatement('table') + us.add_assignment_clause(AssignmentClause('a', 'b')) + us.add_assignment_clause(AssignmentClause('c', 'd')) + us.add_where_clause(WhereClause('a', EqualsOperator(), 'x')) + us.update_context_id(3) + self.assertEqual(unicode(us), 'UPDATE table SET "a" = :4, "c" = :5 WHERE "a" = :3') + self.assertEqual(us.get_context(), {'4': 'b', '5': 'd', '3': 'x'}) + def test_additional_rendering(self): us = UpdateStatement('table', ttl=60) us.add_assignment_clause(AssignmentClause('a', 'b')) From fb502a023fd0f348e67ecadbaff29fd675fa3b5e Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 3 Nov 2013 16:51:26 -0800 Subject: [PATCH 0532/3726] removing parameters from batch and execute methods --- cqlengine/query.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index b924cae6c2..04379bb0b9 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -228,7 +228,7 @@ def __init__(self, batch_type=None, timestamp=None, consistency=None): self.timestamp = timestamp self._consistency = consistency - def add_query(self, query, params): + def add_query(self, query): if not isinstance(query, BaseCQLStatement): raise CQLEngineException('only BaseCQLStatements can be added to a batch query') # TODO: modify query's context id starting point @@ -308,7 +308,7 @@ def __init__(self, model): def column_family_name(self): return self.model.column_family_name() - def _execute(self, q, params=None): + def _execute(self, q): if self._batch: return self._batch.add_query(q) else: @@ -819,11 +819,11 @@ def __init__(self, model, instance=None, batch=None, ttl=None, consistency=None) self._ttl = ttl self._consistency = consistency - def _execute(self, q, params=None): + def _execute(self, q): if self._batch: - return self._batch.add_query(q, params=params) + return self._batch.add_query(q) else: - return execute(q, params=params, consistency_level=self._consistency) + return execute(q, consistency_level=self._consistency) def batch(self, batch_obj): if batch_obj is not None and not isinstance(batch_obj, BatchQuery): From 5a976717b5446ea797cd77920f396ab039f3447d Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 3 Nov 2013 16:54:39 -0800 Subject: [PATCH 0533/3726] reworking batch to work with the new statement objects --- cqlengine/query.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index 04379bb0b9..273f338877 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -231,8 +231,7 @@ def __init__(self, batch_type=None, timestamp=None, consistency=None): def add_query(self, query): if not isinstance(query, BaseCQLStatement): raise CQLEngineException('only BaseCQLStatements can be added to a batch query') - # TODO: modify query's context id starting point - self.queries.append((str(query), query.get_context())) + self.queries.append(query) def consistency(self, consistency): self._consistency = consistency @@ -250,9 +249,13 @@ def execute(self): query_list = [opener] parameters = {} - for query, params in self.queries: - query_list.append(' ' + query) - parameters.update(params) + ctx_counter = 0 + for query in self.queries: + query.update_context_id(ctx_counter) + ctx = query.get_context() + ctx_counter += len(ctx) + query_list.append(' ' + str(query)) + parameters.update(ctx) query_list.append('APPLY BATCH;') From fe6359e4d0bf95d7b75f11bf832c1f061083b5df Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Sun, 3 Nov 2013 16:57:03 -0800 Subject: [PATCH 0534/3726] removing old code --- cqlengine/query.py | 158 ++------------------------------------------- 1 file changed, 5 insertions(+), 153 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index 273f338877..04afff145e 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -1,16 +1,12 @@ import copy from datetime import datetime -from uuid import uuid4 -from hashlib import md5 -from time import time -from uuid import uuid1 -from cqlengine import BaseContainerColumn, BaseValueManager, Map, columns +from cqlengine import BaseContainerColumn, Map, columns from cqlengine.columns import Counter, List, Set -from cqlengine.connection import connection_manager, execute, RowResult +from cqlengine.connection import execute, RowResult from cqlengine.exceptions import CQLEngineException, ValidationError -from cqlengine.functions import QueryValue, Token, BaseQueryFunction +from cqlengine.functions import Token, BaseQueryFunction #CQL 3 reference: #http://www.datastax.com/docs/1.1/references/cql/index @@ -24,150 +20,6 @@ class DoesNotExist(QueryException): pass class MultipleObjectsReturned(QueryException): pass -# class QueryOperatorException(QueryException): pass -# -# -# class QueryOperator(object): -# # The symbol that identifies this operator in filter kwargs -# # ie: colname__ -# symbol = None -# -# # The comparator symbol this operator uses in cql -# cql_symbol = None -# -# QUERY_VALUE_WRAPPER = QueryValue -# -# def __init__(self, column, value): -# self.column = column -# self.value = value -# -# if isinstance(value, QueryValue): -# self.query_value = value -# else: -# self.query_value = self.QUERY_VALUE_WRAPPER(value) -# -# #perform validation on this operator -# self.validate_operator() -# self.validate_value() -# -# @property -# def cql(self): -# """ -# Returns this operator's portion of the WHERE clause -# """ -# return '{} {} {}'.format(self.column.cql, self.cql_symbol, self.query_value.cql) -# -# def validate_operator(self): -# """ -# Checks that this operator can be used on the column provided -# """ -# if self.symbol is None: -# raise QueryOperatorException( -# "{} is not a valid operator, use one with 'symbol' defined".format( -# self.__class__.__name__ -# ) -# ) -# if self.cql_symbol is None: -# raise QueryOperatorException( -# "{} is not a valid operator, use one with 'cql_symbol' defined".format( -# self.__class__.__name__ -# ) -# ) -# -# def validate_value(self): -# """ -# Checks that the compare value works with this operator -# -# Doesn't do anything by default -# """ -# pass -# -# def get_dict(self): -# """ -# Returns this operators contribution to the cql.query arg dictionanry -# -# ie: if this column's name is colname, and the identifier is colval, -# this should return the dict: {'colval':} -# SELECT * FROM column_family WHERE colname=:colval -# """ -# return self.query_value.get_dict(self.column) -# -# @classmethod -# def get_operator(cls, symbol): -# if not hasattr(cls, 'opmap'): -# QueryOperator.opmap = {} -# def _recurse(klass): -# if klass.symbol: -# QueryOperator.opmap[klass.symbol.upper()] = klass -# for subklass in klass.__subclasses__(): -# _recurse(subklass) -# pass -# _recurse(QueryOperator) -# try: -# return QueryOperator.opmap[symbol.upper()] -# except KeyError: -# raise QueryOperatorException("{} doesn't map to a QueryOperator".format(symbol)) -# -# # equality operator, used by tests -# -# def __eq__(self, op): -# return self.__class__ is op.__class__ and \ -# self.column.db_field_name == op.column.db_field_name and \ -# self.value == op.value -# -# def __ne__(self, op): -# return not (self == op) -# -# def __hash__(self): -# return hash(self.column.db_field_name) ^ hash(self.value) -# -# -# class EqualsOperator(QueryOperator): -# symbol = 'EQ' -# cql_symbol = '=' -# -# -# class IterableQueryValue(QueryValue): -# def __init__(self, value): -# try: -# super(IterableQueryValue, self).__init__(value, [uuid4().hex for i in value]) -# except TypeError: -# raise QueryException("in operator arguments must be iterable, {} found".format(value)) -# -# def get_dict(self, column): -# return dict((i, column.to_database(v)) for (i, v) in zip(self.identifier, self.value)) -# -# def get_cql(self): -# return '({})'.format(', '.join(':{}'.format(i) for i in self.identifier)) -# -# -# class InOperator(EqualsOperator): -# symbol = 'IN' -# cql_symbol = 'IN' -# -# QUERY_VALUE_WRAPPER = IterableQueryValue -# -# -# class GreaterThanOperator(QueryOperator): -# symbol = "GT" -# cql_symbol = '>' -# -# -# class GreaterThanOrEqualOperator(QueryOperator): -# symbol = "GTE" -# cql_symbol = '>=' -# -# -# class LessThanOperator(QueryOperator): -# symbol = "LT" -# cql_symbol = '<' -# -# -# class LessThanOrEqualOperator(QueryOperator): -# symbol = "LTE" -# cql_symbol = '<=' -# - class AbstractQueryableColumn(object): """ exposes cql query operators through pythons @@ -318,7 +170,7 @@ def _execute(self, q): return execute(q, consistency_level=self._consistency) def __unicode__(self): - return self._select_query() + return unicode(self._select_query()) def __str__(self): return str(self.__unicode__()) @@ -328,7 +180,7 @@ def __call__(self, *args, **kwargs): def __deepcopy__(self, memo): clone = self.__class__(self.model) - for k,v in self.__dict__.items(): + for k, v in self.__dict__.items(): if k in ['_con', '_cur', '_result_cache', '_result_idx']: # don't clone these clone.__dict__[k] = None elif k == '_batch': From cfc77117939fd8701ac4b454b2e080c703a214c3 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Mon, 4 Nov 2013 09:58:43 -0800 Subject: [PATCH 0535/3726] fixing bug in set update that would prevent the last item in a set from being removed --- cqlengine/statements.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cqlengine/statements.py b/cqlengine/statements.py index dd367a0f88..b3b95656a2 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -181,7 +181,7 @@ def _analyze(self): """ works out the updates to be performed """ if self.value is None or self.value == self.previous: pass - elif self.previous is None or not any({v in self.previous for v in self.value}): + elif self.previous is None: self._assignments = self.value else: # partial update time From 8210bacac77c33cd6cfc7038e3a8a74f14789fea Mon Sep 17 00:00:00 2001 From: Dvir Volk Date: Tue, 12 Nov 2013 13:03:02 +0200 Subject: [PATCH 0536/3726] Changed List column's default to be list and not set --- cqlengine/columns.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index 09f6a9fa3a..ef47f284ef 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -647,7 +647,7 @@ def __str__(self): def __nonzero__(self): return bool(self.value) - def __init__(self, value_type, default=set, **kwargs): + def __init__(self, value_type, default=list, **kwargs): return super(List, self).__init__(value_type=value_type, default=default, **kwargs) def validate(self, value): From a080aff3a73351d37126b14eef606061b445aa37 Mon Sep 17 00:00:00 2001 From: Kai Lautaportti Date: Fri, 7 Jun 2013 19:18:07 +0300 Subject: [PATCH 0537/3726] Fixed a bug when NetworkTopologyStrategy is used. Although the Cassandra documentation implies that the `replication_factor` parameter would be ignored in this case its presence will cause an error when creating a keyspace using NetworkTopologyStrategy. --- cqlengine/management.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/cqlengine/management.py b/cqlengine/management.py index e05c7fc265..b75412c70c 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -37,6 +37,12 @@ def create_keyspace(name, strategy_class='SimpleStrategy', replication_factor=3, } replication_map.update(replication_values) + if strategy_class.lower() != 'simplestrategy': + # Although the Cassandra documentation states for `replication_factor` + # that it is "Required if class is SimpleStrategy; otherwise, + # not used." we get an error if it is present. + replication_map.pop('replication_factor', None) + query = """ CREATE KEYSPACE {} WITH REPLICATION = {} From 4b3edc03d07b3258a38ed070a6fc982b93d288ed Mon Sep 17 00:00:00 2001 From: Kai Lautaportti Date: Tue, 26 Nov 2013 16:48:14 +0200 Subject: [PATCH 0538/3726] Test demonstrating regression in querying with dates --- cqlengine/tests/model/test_model_io.py | 31 ++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/cqlengine/tests/model/test_model_io.py b/cqlengine/tests/model/test_model_io.py index 91d4591246..82abef74ba 100644 --- a/cqlengine/tests/model/test_model_io.py +++ b/cqlengine/tests/model/test_model_io.py @@ -1,5 +1,6 @@ from uuid import uuid4 import random +from datetime import date from cqlengine.tests.base import BaseCassEngTestCase from cqlengine.management import create_table @@ -238,3 +239,33 @@ def test_reserved_cql_words_can_be_used_as_column_names(self): assert model1.insert == model2[0].insert +class TestQueryModel(Model): + test_id = columns.UUID(primary_key=True, default=uuid4) + date = columns.Date(primary_key=True) + description = columns.Text() + + +class TestQuerying(BaseCassEngTestCase): + + @classmethod + def setUpClass(cls): + super(TestQuerying, cls).setUpClass() + delete_table(TestQueryModel) + create_table(TestQueryModel) + + @classmethod + def tearDownClass(cls): + super(TestQuerying, cls).tearDownClass() + delete_table(TestQueryModel) + + def test_query_with_date(self): + uid = uuid4() + day = date(2013, 11, 26) + TestQueryModel.create(test_id=uid, date=day, description=u'foo') + + inst = TestQueryModel.filter( + TestQueryModel.test_id == uid, + TestQueryModel.date == day).limit(1).first() + + assert inst.test_id == uid + assert inst.date == day From 225f09212b88fe07e4ce9564fcff682eb84b051c Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Tue, 26 Nov 2013 07:30:34 -0800 Subject: [PATCH 0539/3726] removing double replication strategy fix --- cqlengine/management.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/cqlengine/management.py b/cqlengine/management.py index b10fd4fc59..81771c48b9 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -42,12 +42,6 @@ def create_keyspace(name, strategy_class='SimpleStrategy', replication_factor=3, # not used." we get an error if it is present. replication_map.pop('replication_factor', None) - if strategy_class.lower() != 'simplestrategy': - # Although the Cassandra documentation states for `replication_factor` - # that it is "Required if class is SimpleStrategy; otherwise, - # not used." we get an error if it is present. - replication_map.pop('replication_factor', None) - query = """ CREATE KEYSPACE {} WITH REPLICATION = {} From cfc076749fd874b974ef6fac1d0508ab2ef41660 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Tue, 26 Nov 2013 08:12:07 -0800 Subject: [PATCH 0540/3726] fixing polymorphic test --- cqlengine/tests/model/test_polymorphism.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cqlengine/tests/model/test_polymorphism.py b/cqlengine/tests/model/test_polymorphism.py index 00078bcb50..4528833517 100644 --- a/cqlengine/tests/model/test_polymorphism.py +++ b/cqlengine/tests/model/test_polymorphism.py @@ -45,8 +45,8 @@ class M1(Base): assert Base._is_polymorphic_base assert not M1._is_polymorphic_base - assert Base._polymorphic_column == Base.type1 - assert M1._polymorphic_column == M1.type1 + assert Base._polymorphic_column is Base._columns['type1'] + assert M1._polymorphic_column is M1._columns['type1'] assert Base._polymorphic_column_name == 'type1' assert M1._polymorphic_column_name == 'type1' From 556a0d776b5f45767fbab63cd60e99aeebc7582d Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Tue, 26 Nov 2013 08:12:25 -0800 Subject: [PATCH 0541/3726] fixing to_database behavior for functions --- cqlengine/functions.py | 18 ++++++++++++------ cqlengine/query.py | 20 +++++++++++++------- 2 files changed, 25 insertions(+), 13 deletions(-) diff --git a/cqlengine/functions.py b/cqlengine/functions.py index ceb6a78be9..5796ba4ae4 100644 --- a/cqlengine/functions.py +++ b/cqlengine/functions.py @@ -53,10 +53,13 @@ def __init__(self, value): raise ValidationError('datetime instance is required') super(MinTimeUUID, self).__init__(value) - def update_context(self, ctx): - epoch = datetime(1970, 1, 1, tzinfo=self.value.tzinfo) + def to_database(self, val): + epoch = datetime(1970, 1, 1, tzinfo=val.tzinfo) offset = epoch.tzinfo.utcoffset(epoch).total_seconds() if epoch.tzinfo else 0 - ctx[str(self.context_id)] = long(((self.value - epoch).total_seconds() - offset) * 1000) + return long(((val - epoch).total_seconds() - offset) * 1000) + + def update_context(self, ctx): + ctx[str(self.context_id)] = self.to_database(self.value) class MaxTimeUUID(BaseQueryFunction): @@ -77,10 +80,13 @@ def __init__(self, value): raise ValidationError('datetime instance is required') super(MaxTimeUUID, self).__init__(value) - def update_context(self, ctx): - epoch = datetime(1970, 1, 1, tzinfo=self.value.tzinfo) + def to_database(self, val): + epoch = datetime(1970, 1, 1, tzinfo=val.tzinfo) offset = epoch.tzinfo.utcoffset(epoch).total_seconds() if epoch.tzinfo else 0 - ctx[str(self.context_id)] = long(((self.value - epoch).total_seconds() - offset) * 1000) + return long(((val - epoch).total_seconds() - offset) * 1000) + + def update_context(self, ctx): + ctx[str(self.context_id)] = self.to_database(self.value) class Token(BaseQueryFunction): diff --git a/cqlengine/query.py b/cqlengine/query.py index 04afff145e..297c2b18b4 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -6,7 +6,7 @@ from cqlengine.connection import execute, RowResult from cqlengine.exceptions import CQLEngineException, ValidationError -from cqlengine.functions import Token, BaseQueryFunction +from cqlengine.functions import Token, BaseQueryFunction, QueryValue #CQL 3 reference: #http://www.datastax.com/docs/1.1/references/cql/index @@ -35,28 +35,34 @@ def __unicode__(self): def __str__(self): return str(unicode(self)) + def _to_database(self, val): + if isinstance(val, QueryValue): + return val + else: + return self._get_column().to_database(val) + def in_(self, item): """ Returns an in operator - used in where you'd typically want to use python's `in` operator + used where you'd typically want to use python's `in` operator """ return WhereClause(unicode(self), InOperator(), item) def __eq__(self, other): - return WhereClause(unicode(self), EqualsOperator(), other) + return WhereClause(unicode(self), EqualsOperator(), self._to_database(other)) def __gt__(self, other): - return WhereClause(unicode(self), GreaterThanOperator(), other) + return WhereClause(unicode(self), GreaterThanOperator(), self._to_database(other)) def __ge__(self, other): - return WhereClause(unicode(self), GreaterThanOrEqualOperator(), other) + return WhereClause(unicode(self), GreaterThanOrEqualOperator(), self._to_database(other)) def __lt__(self, other): - return WhereClause(unicode(self), LessThanOperator(), other) + return WhereClause(unicode(self), LessThanOperator(), self._to_database(other)) def __le__(self, other): - return WhereClause(unicode(self), LessThanOrEqualOperator(), other) + return WhereClause(unicode(self), LessThanOrEqualOperator(), self._to_database(other)) class BatchType(object): From 33023837d2d2f17e254c80c447d77f06869334e5 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Tue, 26 Nov 2013 08:15:24 -0800 Subject: [PATCH 0542/3726] bumping version and updating changelog --- changelog | 4 ++++ cqlengine/VERSION | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/changelog b/changelog index 4aad4980c0..28ce74dff7 100644 --- a/changelog +++ b/changelog @@ -1,5 +1,9 @@ CHANGELOG +0.9.2 +* fixing create keyspace with network topology strategy +* fixing regression with query expressions + 0.9 * adding update method * adding support for ttls diff --git a/cqlengine/VERSION b/cqlengine/VERSION index f374f6662e..2003b639c4 100644 --- a/cqlengine/VERSION +++ b/cqlengine/VERSION @@ -1 +1 @@ -0.9.1 +0.9.2 From d315b0f1f743ed7e7ff4e86a2bdff2a415a4c523 Mon Sep 17 00:00:00 2001 From: Russell Haering Date: Sun, 1 Dec 2013 18:33:49 -0800 Subject: [PATCH 0543/3726] validate Token and pk__token filter parameters --- cqlengine/query.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/cqlengine/query.py b/cqlengine/query.py index 297c2b18b4..ebb97c51c5 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -355,11 +355,22 @@ def filter(self, *args, **kwargs): column = self.model._get_column(col_name) except KeyError: if col_name == 'pk__token': + if not isinstance(val, Token): + raise QueryException("Virtual column 'pk__token' may only be compared to Token() values") column = columns._PartitionKeysToken(self.model) quote_field = False else: raise QueryException("Can't resolve column name: '{}'".format(col_name)) + if isinstance(val, Token): + if col_name != 'pk__token': + raise QueryException("Token() values may only be compared to the 'pk__token' virtual column") + partition_columns = column.partition_columns + if len(partition_columns) != len(val.value): + raise QueryException( + 'Token() received {} arguments but model has {} partition keys'.format( + len(partition_columns), len(val.value))) + #get query operator, or use equals if not supplied operator_class = BaseWhereOperator.get_operator(col_op or 'EQ') operator = operator_class() From 2a1cb90c9a5077b9617f92e4b67fdd3ab9fc5373 Mon Sep 17 00:00:00 2001 From: Russell Haering Date: Sun, 1 Dec 2013 19:05:09 -0800 Subject: [PATCH 0544/3726] update validation of Token use at query-execution time --- cqlengine/query.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index ebb97c51c5..7608bbf834 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -562,17 +562,15 @@ def _validate_select_where(self): """ Checks that a filterset will not create invalid select statement """ #check that there's either a = or IN relationship with a primary key or indexed field equal_ops = [self.model._columns.get(w.field) for w in self._where if isinstance(w.operator, EqualsOperator)] - token_ops = [self.model._columns.get(w.field) for w in self._where if isinstance(w.operator, Token)] - if not any([w.primary_key or w.index for w in equal_ops]) and not token_ops: + token_comparison = any([w for w in self._where if isinstance(w.value, Token)]) + if not any([w.primary_key or w.index for w in equal_ops]) and not token_comparison: raise QueryException('Where clauses require either a "=" or "IN" comparison with either a primary key or indexed field') if not self._allow_filtering: #if the query is not on an indexed field if not any([w.index for w in equal_ops]): - if not any([w.partition_key for w in equal_ops]) and not token_ops: + if not any([w.partition_key for w in equal_ops]) and not token_comparison: raise QueryException('Filtering on a clustering key without a partition key is not allowed unless allow_filtering() is called on the querset') - if any(not w.partition_key for w in token_ops): - raise QueryException('The token() function is only supported on the partition key') def _select_fields(self): if self._defer_fields or self._only_fields: From 5a5e7ef5cdc1fb09da29bbe597ccf04deb0fc898 Mon Sep 17 00:00:00 2001 From: Russell Haering Date: Sun, 1 Dec 2013 19:40:19 -0800 Subject: [PATCH 0545/3726] pass partition columns to Token for use serializing values --- cqlengine/query.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cqlengine/query.py b/cqlengine/query.py index 7608bbf834..1b9ea24489 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -370,6 +370,7 @@ def filter(self, *args, **kwargs): raise QueryException( 'Token() received {} arguments but model has {} partition keys'.format( len(partition_columns), len(val.value))) + val.set_columns(partition_columns) #get query operator, or use equals if not supplied operator_class = BaseWhereOperator.get_operator(col_op or 'EQ') From a597fa5b2f0038f1cda54f8018afab1db7ad6b63 Mon Sep 17 00:00:00 2001 From: Russell Haering Date: Mon, 2 Dec 2013 10:23:55 -0800 Subject: [PATCH 0546/3726] verify SELECT statement generation with token() works now --- cqlengine/tests/query/test_queryoperators.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/cqlengine/tests/query/test_queryoperators.py b/cqlengine/tests/query/test_queryoperators.py index 11dec4dded..b0f3eb571f 100644 --- a/cqlengine/tests/query/test_queryoperators.py +++ b/cqlengine/tests/query/test_queryoperators.py @@ -49,6 +49,9 @@ class TestModel(Model): where.set_context_id(1) self.assertEquals(str(where), 'token("p1", "p2") > token(:{}, :{})'.format(1, 2)) + # Verify that a SELECT query can be successfully generated + str(q._select_query()) + # Token(tuple()) is also possible for convenience # it (allows for Token(obj.pk) syntax) func = functions.Token(('a', 'b')) @@ -57,4 +60,5 @@ class TestModel(Model): where = q._where[0] where.set_context_id(1) self.assertEquals(str(where), 'token("p1", "p2") > token(:{}, :{})'.format(1, 2)) + str(q._select_query()) From f54c3f3613333d20fa03b9b61b96399e9485a992 Mon Sep 17 00:00:00 2001 From: Russell Haering Date: Mon, 2 Dec 2013 10:25:34 -0800 Subject: [PATCH 0547/3726] add tests to verify Token and pk__token validation --- cqlengine/tests/query/test_queryoperators.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/cqlengine/tests/query/test_queryoperators.py b/cqlengine/tests/query/test_queryoperators.py index b0f3eb571f..5988396a62 100644 --- a/cqlengine/tests/query/test_queryoperators.py +++ b/cqlengine/tests/query/test_queryoperators.py @@ -62,3 +62,13 @@ class TestModel(Model): self.assertEquals(str(where), 'token("p1", "p2") > token(:{}, :{})'.format(1, 2)) str(q._select_query()) + # The 'pk__token' virtual column may only be compared to a Token + self.assertRaises(query.QueryException, TestModel.objects.filter, pk__token__gt=10) + + # A Token may only be compared to the `pk__token' virtual column + func = functions.Token('a', 'b') + self.assertRaises(query.QueryException, TestModel.objects.filter, p1__gt=func) + + # The # of arguments to Token must match the # of partition keys + func = functions.Token('a') + self.assertRaises(query.QueryException, TestModel.objects.filter, pk__token__gt=func) From 5fa4ba4ead7dc2e2b7440a23fad1e1bc67c6557d Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 5 Dec 2013 07:41:41 -0800 Subject: [PATCH 0548/3726] adding test around token values --- cqlengine/tests/query/test_queryoperators.py | 35 ++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/cqlengine/tests/query/test_queryoperators.py b/cqlengine/tests/query/test_queryoperators.py index 11dec4dded..69fb0a6d4b 100644 --- a/cqlengine/tests/query/test_queryoperators.py +++ b/cqlengine/tests/query/test_queryoperators.py @@ -7,6 +7,7 @@ from cqlengine import query from cqlengine.statements import WhereClause from cqlengine.operators import EqualsOperator +from cqlengine.management import sync_table, drop_table class TestQuerySetOperation(BaseCassEngTestCase): @@ -36,7 +37,41 @@ def test_mintimeuuid_function(self): where.update_context(ctx) self.assertEqual(ctx, {'5': DateTime().to_database(now)}) + +class TokenTestModel(Model): + key = columns.Integer(primary_key=True) + val = columns.Integer() + + +class TestTokenFunction(BaseCassEngTestCase): + + def setUp(self): + super(TestTokenFunction, self).setUp() + sync_table(TokenTestModel) + + def tearDown(self): + super(TestTokenFunction, self).tearDown() + drop_table(TokenTestModel) + def test_token_function(self): + """ Tests that token functions work properly """ + assert TokenTestModel.objects().count() == 0 + for i in range(10): + TokenTestModel.create(key=i, val=i) + assert TokenTestModel.objects().count() == 10 + seen_keys = set() + last_token = None + for instance in TokenTestModel.objects().limit(5): + last_token = instance.key + seen_keys.add(last_token) + assert len(seen_keys) == 5 + for instance in TokenTestModel.objects(pk__token__gt=functions.Token(last_token)): + seen_keys.add(instance.key) + + assert len(seen_keys) == 10 + assert all([i in seen_keys for i in range(10)]) + + def test_compound_pk_token_function(self): class TestModel(Model): p1 = columns.Text(partition_key=True) From 2951ba35de1fc7213121a9b45b7ff679d543c5d3 Mon Sep 17 00:00:00 2001 From: Kai Lautaportti Date: Thu, 28 Nov 2013 14:26:46 +0200 Subject: [PATCH 0549/3726] Use CL.ONE explicitly when interacting with the system keyspace The system keyspace uses "LocalStrategy" for replication so it needs to be accessed with CL.ONE. If the default consistency level is set to something else all management commands which rely on introspecting the system keyspace will fail. --- cqlengine/management.py | 16 ++++++++++------ .../tests/connections/test_connection_pool.py | 3 ++- cqlengine/tests/management/test_management.py | 3 ++- 3 files changed, 14 insertions(+), 8 deletions(-) diff --git a/cqlengine/management.py b/cqlengine/management.py index 81771c48b9..b7cdf4d127 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -1,6 +1,7 @@ import json import warnings from cqlengine import SizeTieredCompactionStrategy, LeveledCompactionStrategy +from cqlengine import ONE from cqlengine.named import NamedTable from cqlengine.connection import connection_manager, execute @@ -28,7 +29,7 @@ def create_keyspace(name, strategy_class='SimpleStrategy', replication_factor=3, :param **replication_values: 1.2 only, additional values to ad to the replication data map """ with connection_manager() as con: - _, keyspaces = con.execute("""SELECT keyspace_name FROM system.schema_keyspaces""", {}) + _, keyspaces = con.execute("""SELECT keyspace_name FROM system.schema_keyspaces""", {}, ONE) if name not in [r[0] for r in keyspaces]: #try the 1.2 method replication_map = { @@ -55,7 +56,7 @@ def create_keyspace(name, strategy_class='SimpleStrategy', replication_factor=3, def delete_keyspace(name): with connection_manager() as con: - _, keyspaces = con.execute("""SELECT keyspace_name FROM system.schema_keyspaces""", {}) + _, keyspaces = con.execute("""SELECT keyspace_name FROM system.schema_keyspaces""", {}, ONE) if name in [r[0] for r in keyspaces]: execute("DROP KEYSPACE {}".format(name)) @@ -80,7 +81,8 @@ def sync_table(model, create_missing_keyspace=True): with connection_manager() as con: tables = con.execute( "SELECT columnfamily_name from system.schema_columnfamilies WHERE keyspace_name = :ks_name", - {'ks_name': ks_name} + {'ks_name': ks_name}, + ONE ) tables = [x[0] for x in tables.results] @@ -115,7 +117,8 @@ def sync_table(model, create_missing_keyspace=True): with connection_manager() as con: _, idx_names = con.execute( "SELECT index_name from system.\"IndexInfo\" WHERE table_name=:table_name", - {'table_name': raw_cf_name} + {'table_name': raw_cf_name}, + ONE ) idx_names = [i[0] for i in idx_names] @@ -230,7 +233,7 @@ def get_fields(model): logger.debug("get_fields %s %s", ks_name, col_family) - tmp = con.execute(query, {'ks_name':ks_name, 'col_family':col_family}) + tmp = con.execute(query, {'ks_name': ks_name, 'col_family': col_family}, ONE) return [Field(x[0], x[1]) for x in tmp.results] # convert to Field named tuples @@ -282,7 +285,8 @@ def drop_table(model): with connection_manager() as con: _, tables = con.execute( "SELECT columnfamily_name from system.schema_columnfamilies WHERE keyspace_name = :ks_name", - {'ks_name': ks_name} + {'ks_name': ks_name}, + ONE ) raw_cf_name = model.column_family_name(include_keyspace=False) if raw_cf_name not in [t[0] for t in tables]: diff --git a/cqlengine/tests/connections/test_connection_pool.py b/cqlengine/tests/connections/test_connection_pool.py index 29b3ea50a9..b34ea47182 100644 --- a/cqlengine/tests/connections/test_connection_pool.py +++ b/cqlengine/tests/connections/test_connection_pool.py @@ -2,6 +2,7 @@ from cql import OperationalError from mock import MagicMock, patch, Mock +from cqlengine import ONE from cqlengine.connection import ConnectionPool, Host @@ -18,4 +19,4 @@ def cursor(self): with patch.object(p, 'get', return_value=MockConnection()): with self.assertRaises(OperationalError): - p.execute("select * from system.peers", {}) + p.execute("select * from system.peers", {}, ONE) diff --git a/cqlengine/tests/management/test_management.py b/cqlengine/tests/management/test_management.py index 88405b1e40..25f4d268b6 100644 --- a/cqlengine/tests/management/test_management.py +++ b/cqlengine/tests/management/test_management.py @@ -1,5 +1,6 @@ from mock import MagicMock, patch +from cqlengine import ONE from cqlengine.exceptions import CQLEngineException from cqlengine.management import create_table, delete_table, get_fields from cqlengine.tests.base import BaseCassEngTestCase @@ -22,7 +23,7 @@ def test_totally_dead_pool(self): with patch('cqlengine.connection.cql.connect') as mock: mock.side_effect=CQLEngineException with self.assertRaises(CQLEngineException): - self.pool.execute("select * from system.peers", {}) + self.pool.execute("select * from system.peers", {}, ONE) def test_dead_node(self): """ From 380eeeb8cabce3cd817ffa775d58c596b0e53c81 Mon Sep 17 00:00:00 2001 From: Kai Lautaportti Date: Tue, 10 Dec 2013 11:57:24 +0200 Subject: [PATCH 0550/3726] Make get_table_settings use CL.ONE explicitly --- cqlengine/management.py | 5 +++-- cqlengine/query.py | 10 +++++----- setup.py | 2 +- 3 files changed, 9 insertions(+), 8 deletions(-) diff --git a/cqlengine/management.py b/cqlengine/management.py index b7cdf4d127..6d41489e6f 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -239,8 +239,9 @@ def get_fields(model): def get_table_settings(model): - return schema_columnfamilies.get(keyspace_name=model._get_keyspace(), - columnfamily_name=model.column_family_name(include_keyspace=False)) + return schema_columnfamilies.objects.consistency(ONE).get( + keyspace_name=model._get_keyspace(), + columnfamily_name=model.column_family_name(include_keyspace=False)) def update_compaction(model): diff --git a/cqlengine/query.py b/cqlengine/query.py index 1b9ea24489..ed59e07700 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -318,6 +318,11 @@ def first(self): def all(self): return copy.deepcopy(self) + def consistency(self, consistency): + clone = copy.deepcopy(self) + clone._consistency = consistency + return clone + def _parse_filter_arg(self, arg): """ Parses a filter arg in the format: @@ -626,11 +631,6 @@ def values_list(self, *fields, **kwargs): clone._flat_values_list = flat return clone - def consistency(self, consistency): - clone = copy.deepcopy(self) - clone._consistency = consistency - return clone - def ttl(self, ttl): clone = copy.deepcopy(self) clone._ttl = ttl diff --git a/setup.py b/setup.py index 2965a6d73a..98700ad684 100644 --- a/setup.py +++ b/setup.py @@ -18,7 +18,7 @@ setup( name='cqlengine', - version=version, + version='0.9.2-2951ba35de1fc7213121a9b45b7ff679d543c5d3', description='Cassandra CQL 3 Object Mapper for Python', long_description=long_desc, classifiers = [ From 10c516138e6e3c4560889a1d87a4400954c63c2a Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 11 Dec 2013 10:41:46 -0800 Subject: [PATCH 0551/3726] execute_on_exception in batches --- cqlengine/query.py | 8 ++-- cqlengine/tests/query/test_batch_query.py | 49 +++++++++++++++++++++-- 2 files changed, 50 insertions(+), 7 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index 1b9ea24489..33d0ae15e7 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -78,13 +78,15 @@ class BatchQuery(object): """ _consistency = None - def __init__(self, batch_type=None, timestamp=None, consistency=None): + + def __init__(self, batch_type=None, timestamp=None, consistency=None, execute_on_exception=False): self.queries = [] self.batch_type = batch_type if timestamp is not None and not isinstance(timestamp, datetime): raise CQLEngineException('timestamp object must be an instance of datetime') self.timestamp = timestamp self._consistency = consistency + self._execute_on_exception = execute_on_exception def add_query(self, query): if not isinstance(query, BaseCQLStatement): @@ -125,8 +127,8 @@ def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): - #don't execute if there was an exception - if exc_type is not None: return + #don't execute if there was an exception by default + if exc_type is not None and not self._execute_on_exception: return self.execute() diff --git a/cqlengine/tests/query/test_batch_query.py b/cqlengine/tests/query/test_batch_query.py index 37e8d28d56..94080729de 100644 --- a/cqlengine/tests/query/test_batch_query.py +++ b/cqlengine/tests/query/test_batch_query.py @@ -3,7 +3,7 @@ from uuid import uuid4 import random from cqlengine import Model, columns -from cqlengine.management import delete_table, create_table +from cqlengine.management import drop_table, sync_table from cqlengine.query import BatchQuery, DMLQuery from cqlengine.tests.base import BaseCassEngTestCase @@ -13,18 +13,23 @@ class TestMultiKeyModel(Model): count = columns.Integer(required=False) text = columns.Text(required=False) +class BatchQueryLogModel(Model): + # simple k/v table + k = columns.Integer(primary_key=True) + v = columns.Integer() + class BatchQueryTests(BaseCassEngTestCase): @classmethod def setUpClass(cls): super(BatchQueryTests, cls).setUpClass() - delete_table(TestMultiKeyModel) - create_table(TestMultiKeyModel) + drop_table(TestMultiKeyModel) + sync_table(TestMultiKeyModel) @classmethod def tearDownClass(cls): super(BatchQueryTests, cls).tearDownClass() - delete_table(TestMultiKeyModel) + drop_table(TestMultiKeyModel) def setUp(self): super(BatchQueryTests, self).setUp() @@ -123,3 +128,39 @@ def test_dml_none_success_case(self): q.batch(None) assert q._batch is None + + def test_batch_execute_on_exception_succeeds(self): + # makes sure if execute_on_exception == True we still apply the batch + drop_table(BatchQueryLogModel) + sync_table(BatchQueryLogModel) + + obj = BatchQueryLogModel.objects(k=1) + self.assertEqual(0, len(obj)) + + try: + with BatchQuery(execute_on_exception=True) as b: + BatchQueryLogModel.batch(b).create(k=1, v=1) + raise Exception("Blah") + except: + pass + + obj = BatchQueryLogModel.objects(k=1) + self.assertEqual(1, len(obj)) + + def test_batch_execute_on_exception_skips_if_not_specified(self): + # makes sure if execute_on_exception == True we still apply the batch + drop_table(BatchQueryLogModel) + sync_table(BatchQueryLogModel) + + obj = BatchQueryLogModel.objects(k=2) + self.assertEqual(0, len(obj)) + + try: + with BatchQuery(execute_on_exception=True) as b: + BatchQueryLogModel.batch(b).create(k=2, v=2) + raise Exception("Blah") + except: + pass + + obj = BatchQueryLogModel.objects(k=2) + self.assertEqual(1, len(obj)) From 3d351e475d28f927b0c25f6806da38512d7de488 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 11 Dec 2013 14:59:39 -0800 Subject: [PATCH 0552/3726] fixed weird bug in tests which allowed them to pass but for the wrong reasons --- cqlengine/tests/query/test_batch_query.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/cqlengine/tests/query/test_batch_query.py b/cqlengine/tests/query/test_batch_query.py index 94080729de..f4a138509e 100644 --- a/cqlengine/tests/query/test_batch_query.py +++ b/cqlengine/tests/query/test_batch_query.py @@ -140,7 +140,7 @@ def test_batch_execute_on_exception_succeeds(self): try: with BatchQuery(execute_on_exception=True) as b: BatchQueryLogModel.batch(b).create(k=1, v=1) - raise Exception("Blah") + raise Exception("Blah") except: pass @@ -156,11 +156,11 @@ def test_batch_execute_on_exception_skips_if_not_specified(self): self.assertEqual(0, len(obj)) try: - with BatchQuery(execute_on_exception=True) as b: + with BatchQuery() as b: BatchQueryLogModel.batch(b).create(k=2, v=2) - raise Exception("Blah") + raise Exception("Blah") except: pass obj = BatchQueryLogModel.objects(k=2) - self.assertEqual(1, len(obj)) + self.assertEqual(0, len(obj)) From 70e55fd0e66016889461edf6ea26a3dbc733648c Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 11 Dec 2013 15:13:46 -0800 Subject: [PATCH 0553/3726] added inline explanation of why the 2 new batch exception tests should pass --- cqlengine/tests/query/test_batch_query.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cqlengine/tests/query/test_batch_query.py b/cqlengine/tests/query/test_batch_query.py index f4a138509e..1d7e1804db 100644 --- a/cqlengine/tests/query/test_batch_query.py +++ b/cqlengine/tests/query/test_batch_query.py @@ -145,6 +145,7 @@ def test_batch_execute_on_exception_succeeds(self): pass obj = BatchQueryLogModel.objects(k=1) + # should be 1 because the batch should execute self.assertEqual(1, len(obj)) def test_batch_execute_on_exception_skips_if_not_specified(self): @@ -163,4 +164,6 @@ def test_batch_execute_on_exception_skips_if_not_specified(self): pass obj = BatchQueryLogModel.objects(k=2) + + # should be 0 because the batch should not execute self.assertEqual(0, len(obj)) From 448d861c7bcb42a1c2e5026c38417c954d266b28 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 11 Dec 2013 15:25:06 -0800 Subject: [PATCH 0554/3726] updated batch docs --- docs/topics/queryset.rst | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/docs/topics/queryset.rst b/docs/topics/queryset.rst index a73ed1d05f..4cf0d20488 100644 --- a/docs/topics/queryset.rst +++ b/docs/topics/queryset.rst @@ -312,6 +312,20 @@ Batch Queries ExampleModel.objects(id=some_id2).batch(b).delete() b.execute() + + Typically you will not want the block to execute if an exception occurs inside the `with` block. However, in the case that this is desirable, it's achievable by using the following syntax: + + .. code-block:: python + + with BatchQuery(execute_on_exception=True) as b: + LogEntry.batch(b).create(k=1, v=1) + mystery_function() # exception thrown in here + LogEntry.batch(b).create(k=1, v=2) # this code is never reached due to the exception, but anything leading up to here will execute in the batch. + + If an exception is thrown somewhere in the block, any statements that have been added to the batch will still be executed. This is useful for some logging situations. + + + QuerySet method reference ========================= From a45d867b4f7010d190afda555ce45ec283a10a7c Mon Sep 17 00:00:00 2001 From: Konstantin Podshumok Date: Thu, 12 Dec 2013 11:06:29 +0300 Subject: [PATCH 0555/3726] log exception on _create_connection failure --- cqlengine/connection.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index 6e08a3580f..cc352a6549 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -197,9 +197,9 @@ def _create_connection(self): ) new_conn.set_cql_version('3.0.0') return new_conn - except Exception as e: - logging.debug("Could not establish connection to {}:{}".format(host.name, host.port)) - pass + except Exception as exc: + logging.debug("Could not establish connection to" + " {}:{} ({!r})".format(host.name, host.port, exc)) raise CQLConnectionError("Could not connect to any server in cluster") From 57741b1f7df2cdb58cb2b181c06736ecf562bf75 Mon Sep 17 00:00:00 2001 From: Roey Berman Date: Thu, 12 Dec 2013 12:11:45 +0200 Subject: [PATCH 0556/3726] fix ttl in update statement --- cqlengine/statements.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/cqlengine/statements.py b/cqlengine/statements.py index b3b95656a2..8ba5741756 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -582,15 +582,16 @@ class UpdateStatement(AssignmentStatement): def __unicode__(self): qs = ['UPDATE', self.table] + + if self.ttl: + qs += ["USING TTL {}".format(self.ttl)] + qs += ['SET'] qs += [', '.join([unicode(c) for c in self.assignments])] if self.where_clauses: qs += [self._where] - if self.ttl: - qs += ["USING TTL {}".format(self.ttl)] - return ' '.join(qs) From 29912bb944cec7ac48e44cd8597f354b2276d8b2 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 12 Dec 2013 10:53:04 -0800 Subject: [PATCH 0557/3726] test verifying the TTL syntax is valid --- cqlengine/tests/test_ttl.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/cqlengine/tests/test_ttl.py b/cqlengine/tests/test_ttl.py index 716caface8..0d465ac09a 100644 --- a/cqlengine/tests/test_ttl.py +++ b/cqlengine/tests/test_ttl.py @@ -63,6 +63,12 @@ def test_update_includes_ttl(self): query = m.call_args[0][0] self.assertIn("USING TTL", query) + def test_update_syntax_valid(self): + # sanity test that ensures the TTL syntax is accepted by cassandra + model = TestTTLModel.create(text="goodbye blake") + model.ttl(60).update(text="goodbye forever") + + From 2b8a811bd77c6a79e8c4cf6529fc67731bd0b26b Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 12 Dec 2013 10:53:04 -0800 Subject: [PATCH 0558/3726] test verifying the TTL syntax is valid --- cqlengine/tests/test_ttl.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/cqlengine/tests/test_ttl.py b/cqlengine/tests/test_ttl.py index 716caface8..0d465ac09a 100644 --- a/cqlengine/tests/test_ttl.py +++ b/cqlengine/tests/test_ttl.py @@ -63,6 +63,12 @@ def test_update_includes_ttl(self): query = m.call_args[0][0] self.assertIn("USING TTL", query) + def test_update_syntax_valid(self): + # sanity test that ensures the TTL syntax is accepted by cassandra + model = TestTTLModel.create(text="goodbye blake") + model.ttl(60).update(text="goodbye forever") + + From 25bd8af1d35cb3dc9c6d1c030592eef5714b7cf1 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 12 Dec 2013 13:46:52 -0800 Subject: [PATCH 0559/3726] fixed issue with setting a list to an empty list --- cqlengine/statements.py | 7 ++-- .../tests/columns/test_container_columns.py | 39 ++++++++++++++----- 2 files changed, 33 insertions(+), 13 deletions(-) diff --git a/cqlengine/statements.py b/cqlengine/statements.py index 8ba5741756..45f3fabead 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -218,7 +218,7 @@ def __unicode__(self): if not self._analyzed: self._analyze() qs = [] ctx_id = self.context_id - if self._assignments: + if self._assignments is not None: qs += ['"{}" = :{}'.format(self.field, ctx_id)] ctx_id += 1 @@ -233,12 +233,12 @@ def __unicode__(self): def get_context_size(self): if not self._analyzed: self._analyze() - return int(bool(self._assignments)) + int(bool(self._append)) + int(bool(self._prepend)) + return int(self._assignments is not None) + int(bool(self._append)) + int(bool(self._prepend)) def update_context(self, ctx): if not self._analyzed: self._analyze() ctx_id = self.context_id - if self._assignments: + if self._assignments is not None: ctx[str(ctx_id)] = self._to_database(self._assignments) ctx_id += 1 if self._prepend: @@ -263,6 +263,7 @@ def _analyze(self): # rewrite the whole list self._assignments = self.value + elif len(self.previous) == 0: # if we're updating from an empty # list, do a complete insert diff --git a/cqlengine/tests/columns/test_container_columns.py b/cqlengine/tests/columns/test_container_columns.py index 1cb19f81f3..2a3707d03b 100644 --- a/cqlengine/tests/columns/test_container_columns.py +++ b/cqlengine/tests/columns/test_container_columns.py @@ -4,7 +4,7 @@ from cqlengine import Model, ValidationError from cqlengine import columns -from cqlengine.management import create_table, delete_table +from cqlengine.management import sync_table, drop_table from cqlengine.tests.base import BaseCassEngTestCase @@ -34,13 +34,13 @@ class TestSetColumn(BaseCassEngTestCase): @classmethod def setUpClass(cls): super(TestSetColumn, cls).setUpClass() - delete_table(TestSetModel) - create_table(TestSetModel) + drop_table(TestSetModel) + sync_table(TestSetModel) @classmethod def tearDownClass(cls): super(TestSetColumn, cls).tearDownClass() - delete_table(TestSetModel) + drop_table(TestSetModel) def test_empty_set_initial(self): """ @@ -187,13 +187,13 @@ class TestListColumn(BaseCassEngTestCase): @classmethod def setUpClass(cls): super(TestListColumn, cls).setUpClass() - delete_table(TestListModel) - create_table(TestListModel) + drop_table(TestListModel) + sync_table(TestListModel) @classmethod def tearDownClass(cls): super(TestListColumn, cls).tearDownClass() - delete_table(TestListModel) + drop_table(TestListModel) def test_initial(self): tmp = TestListModel.create() @@ -315,6 +315,24 @@ def test_default_empty_container_saving(self): m = TestListModel.get(partition=pkey) self.assertEqual(m.int_list, [1,2,3,4]) + def test_remove_entry_works(self): + pkey = uuid4() + tmp = TestListModel.create(partition=pkey, int_list=[1,2]) + tmp.int_list.pop() + tmp.update() + tmp = TestListModel.get(partition=pkey) + self.assertEqual(tmp.int_list, [1]) + + def test_update_from_non_empty_to_empty(self): + pkey = uuid4() + tmp = TestListModel.create(partition=pkey, int_list=[1,2]) + tmp.int_list = [] + tmp.update() + + tmp = TestListModel.get(partition=pkey) + self.assertEqual(tmp.int_list, []) + + class TestMapModel(Model): partition = columns.UUID(primary_key=True, default=uuid4) @@ -326,13 +344,13 @@ class TestMapColumn(BaseCassEngTestCase): @classmethod def setUpClass(cls): super(TestMapColumn, cls).setUpClass() - delete_table(TestMapModel) - create_table(TestMapModel) + drop_table(TestMapModel) + sync_table(TestMapModel) @classmethod def tearDownClass(cls): super(TestMapColumn, cls).tearDownClass() - delete_table(TestMapModel) + drop_table(TestMapModel) def test_empty_default(self): tmp = TestMapModel.create() @@ -343,6 +361,7 @@ def test_empty_retrieve(self): tmp2 = TestMapModel.get(partition=tmp.partition) tmp2.int_map['blah'] = 1 + def test_remove_last_entry_works(self): tmp = TestMapModel.create() tmp.text_map["blah"] = datetime.now() From 5c425e819333a89cfd1f31620b691d60017fedda Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 12 Dec 2013 15:19:53 -0800 Subject: [PATCH 0560/3726] disallow saving None inside set and list --- cqlengine/columns.py | 5 +++++ cqlengine/tests/columns/test_container_columns.py | 11 +++++++++++ 2 files changed, 16 insertions(+) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index ef47f284ef..11a9f60dcf 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -574,6 +574,9 @@ def validate(self, value): else: raise ValidationError('{} cannot be coerced to a set object'.format(val)) + if None in val: + raise ValidationError("None not allowed in a set") + return {self.value_col.validate(v) for v in val} def to_python(self, value): @@ -655,6 +658,8 @@ def validate(self, value): if val is None: return if not isinstance(val, (set, list, tuple)): raise ValidationError('{} is not a list object'.format(val)) + if None in val: + raise ValidationError("None is not allowed in a list") return [self.value_col.validate(v) for v in val] def to_python(self, value): diff --git a/cqlengine/tests/columns/test_container_columns.py b/cqlengine/tests/columns/test_container_columns.py index 2a3707d03b..da26582322 100644 --- a/cqlengine/tests/columns/test_container_columns.py +++ b/cqlengine/tests/columns/test_container_columns.py @@ -42,6 +42,10 @@ def tearDownClass(cls): super(TestSetColumn, cls).tearDownClass() drop_table(TestSetModel) + def test_add_none_fails(self): + with self.assertRaises(ValidationError): + m = TestSetModel.create(int_set=set([None])) + def test_empty_set_initial(self): """ tests that sets are set() by default, should never be none @@ -332,6 +336,13 @@ def test_update_from_non_empty_to_empty(self): tmp = TestListModel.get(partition=pkey) self.assertEqual(tmp.int_list, []) + def test_insert_none(self): + pkey = uuid4() + with self.assertRaises(ValidationError): + TestListModel.create(partition=pkey, int_list=[None]) + + + class TestMapModel(Model): From daa4772e45cd844107ef41f151495cd0aa4d8ea4 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 12 Dec 2013 15:23:00 -0800 Subject: [PATCH 0561/3726] Fixed #90, putting None inside containers now throws a validation error --- cqlengine/tests/columns/test_container_columns.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/cqlengine/tests/columns/test_container_columns.py b/cqlengine/tests/columns/test_container_columns.py index da26582322..d54a339f06 100644 --- a/cqlengine/tests/columns/test_container_columns.py +++ b/cqlengine/tests/columns/test_container_columns.py @@ -367,6 +367,15 @@ def test_empty_default(self): tmp = TestMapModel.create() tmp.int_map['blah'] = 1 + def test_add_none_as_map_key(self): + with self.assertRaises(ValidationError): + TestMapModel.create(int_map={None:1}) + + def test_add_none_as_map_value(self): + with self.assertRaises(ValidationError): + TestMapModel.create(int_map={None:1}) + + def test_empty_retrieve(self): tmp = TestMapModel.create() tmp2 = TestMapModel.get(partition=tmp.partition) From 1791544946a3465a5f742ae5841d1b1f73954003 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 12 Dec 2013 16:00:39 -0800 Subject: [PATCH 0562/3726] ensure we aren't trying to include polymorphic keys when we blah.objects.delete --- cqlengine/tests/model/test_polymorphism.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/cqlengine/tests/model/test_polymorphism.py b/cqlengine/tests/model/test_polymorphism.py index 4528833517..f950bbcdf8 100644 --- a/cqlengine/tests/model/test_polymorphism.py +++ b/cqlengine/tests/model/test_polymorphism.py @@ -1,7 +1,9 @@ import uuid +import mock from cqlengine import columns from cqlengine import models +from cqlengine.connection import ConnectionPool from cqlengine.tests.base import BaseCassEngTestCase from cqlengine import management @@ -118,6 +120,21 @@ def test_query_deserialization(self): assert isinstance(p1r, Poly1) assert isinstance(p2r, Poly2) + def test_delete_on_polymorphic_subclass_does_not_include_polymorphic_key(self): + p1 = Poly1.create() + + with mock.patch.object(ConnectionPool, 'execute') as m: + Poly1.objects(partition=p1.partition).delete() + + # make sure our polymorphic key isn't in the CQL + # not sure how we would even get here if it was in there + # since the CQL would fail. + + self.assertNotIn("row_type", m.call_args[0][0]) + + + + class UnindexedPolyBase(models.Model): partition = columns.UUID(primary_key=True, default=uuid.uuid4) From adcfbb9b13f435841b215ac8e87a59fd11c74217 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 12 Dec 2013 16:56:30 -0800 Subject: [PATCH 0563/3726] version bump --- cqlengine/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cqlengine/VERSION b/cqlengine/VERSION index 2003b639c4..78bc1abd14 100644 --- a/cqlengine/VERSION +++ b/cqlengine/VERSION @@ -1 +1 @@ -0.9.2 +0.10.0 From 4e8244022bcf0d3e3012ba8ea9a514a19a66f457 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 12 Dec 2013 17:02:11 -0800 Subject: [PATCH 0564/3726] fixed version --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 98700ad684..2965a6d73a 100644 --- a/setup.py +++ b/setup.py @@ -18,7 +18,7 @@ setup( name='cqlengine', - version='0.9.2-2951ba35de1fc7213121a9b45b7ff679d543c5d3', + version=version, description='Cassandra CQL 3 Object Mapper for Python', long_description=long_desc, classifiers = [ From 5165bb078c3fbd51dbe112e25a7c5c7974800397 Mon Sep 17 00:00:00 2001 From: Kai Lautaportti Date: Sat, 14 Dec 2013 20:23:21 +0200 Subject: [PATCH 0565/3726] Fix logic determining if compaction options need syncing The compaction_strategy_options as received from the `system.schema_columnfamilies` table are all string valued and comparison to typed values (ints, floats) as defined in models results in sync_table() issuning ALTER TABLE statements redundantly even if there were no changes to compaction options. This patch casts the values defined in models to strings to ensure they compare equal as needed. Additionally, this patch normalizes handling of the `min_threshold` and `max_threshold` options for SizeTieredCompactionStrategy which exist outside the `compaction_strategy_options` mapping and have different names in the `system.schema_columnfamilies` table compared to the CQL options. A typo in the `BaseModel.__compaction_tombstone_threshold__` option is also fixed. --- cqlengine/management.py | 29 +++++++++++--- cqlengine/models.py | 2 +- .../management/test_compaction_settings.py | 40 ++++++++++++++++++- 3 files changed, 63 insertions(+), 8 deletions(-) diff --git a/cqlengine/management.py b/cqlengine/management.py index 6d41489e6f..04c23992da 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -207,9 +207,12 @@ def setter(key, limited_to_strategy = None): raise CQLEngineException("{} is limited to {}".format(key, limited_to_strategy)) if tmp: - result[key] = tmp + # Explicitly cast the values to strings to be able to compare the + # values against introspected values from Cassandra. + result[key] = str(tmp) setter('tombstone_compaction_interval') + setter('tombstone_threshold') setter('bucket_high', SizeTieredCompactionStrategy) setter('bucket_low', SizeTieredCompactionStrategy) @@ -217,7 +220,7 @@ def setter(key, limited_to_strategy = None): setter('min_threshold', SizeTieredCompactionStrategy) setter('min_sstable_size', SizeTieredCompactionStrategy) - setter("sstable_size_in_mb", LeveledCompactionStrategy) + setter('sstable_size_in_mb', LeveledCompactionStrategy) return result @@ -245,6 +248,14 @@ def get_table_settings(model): def update_compaction(model): + """Updates the compaction options for the given model if necessary. + + :param model: The model to update. + + :return: `True`, if the compaction options were modified in Cassandra, + `False` otherwise. + :rtype: bool + """ logger.debug("Checking %s for compaction differences", model) row = get_table_settings(model) # check compaction_strategy_class @@ -253,13 +264,17 @@ def update_compaction(model): do_update = not row['compaction_strategy_class'].endswith(model.__compaction__) - existing_options = row['compaction_strategy_options'] - existing_options = json.loads(existing_options) + existing_options = json.loads(row['compaction_strategy_options']) + # The min/max thresholds are stored differently in the system data dictionary + existing_options.update({ + 'min_threshold': str(row['min_compaction_threshold']), + 'max_threshold': str(row['max_compaction_threshold']), + }) desired_options = get_compaction_options(model) desired_options.pop('class', None) - for k,v in desired_options.items(): + for k, v in desired_options.items(): val = existing_options.pop(k, None) if val != v: do_update = True @@ -273,12 +288,16 @@ def update_compaction(model): query = "ALTER TABLE {} with compaction = {}".format(cf_name, options) logger.debug(query) execute(query) + return True + + return False def delete_table(model): warnings.warn("delete_table has been deprecated in favor of drop_table()", DeprecationWarning) return drop_table(model) + def drop_table(model): # don't try to delete non existant tables diff --git a/cqlengine/models.py b/cqlengine/models.py index 04854c020a..252984a6bb 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -214,7 +214,7 @@ class MultipleObjectsReturned(_MultipleObjectsReturned): pass # compaction options __compaction__ = None __compaction_tombstone_compaction_interval__ = None - __compaction_tombstone_threshold = None + __compaction_tombstone_threshold__ = None # compaction - size tiered options __compaction_bucket_high__ = None diff --git a/cqlengine/tests/management/test_compaction_settings.py b/cqlengine/tests/management/test_compaction_settings.py index 014abcb2db..e2147e6ccd 100644 --- a/cqlengine/tests/management/test_compaction_settings.py +++ b/cqlengine/tests/management/test_compaction_settings.py @@ -39,7 +39,7 @@ def test_size_tiered(self): def test_min_threshold(self): self.model.__compaction_min_threshold__ = 2 result = get_compaction_options(self.model) - assert result['min_threshold'] == 2 + assert result['min_threshold'] == '2' class LeveledCompactionTest(BaseCompactionTest): @@ -69,7 +69,7 @@ def test_sstable_size_in_mb(self): with patch.object(self.model, '__compaction_sstable_size_in_mb__', 32): result = get_compaction_options(self.model) - assert result['sstable_size_in_mb'] == 32 + assert result['sstable_size_in_mb'] == '32' class LeveledcompactionTestTable(Model): @@ -90,6 +90,42 @@ def test_alter_is_called_table(self): sync_table(LeveledcompactionTestTable) assert mock.called == 1 + def test_compaction_not_altered_without_changes_leveled(self): + from cqlengine.management import update_compaction + + class LeveledCompactionChangesDetectionTest(Model): + __compaction__ = LeveledCompactionStrategy + __compaction_sstable_size_in_mb__ = 160 + __compaction_tombstone_threshold__ = 0.125 + __compaction_tombstone_compaction_interval__ = 3600 + + pk = columns.Integer(primary_key=True) + + drop_table(LeveledCompactionChangesDetectionTest) + sync_table(LeveledCompactionChangesDetectionTest) + + assert not update_compaction(LeveledCompactionChangesDetectionTest) + + def test_compaction_not_altered_without_changes_sizetiered(self): + from cqlengine.management import update_compaction + + class SizeTieredCompactionChangesDetectionTest(Model): + __compaction__ = SizeTieredCompactionStrategy + __compaction_bucket_high__ = 20 + __compaction_bucket_low__ = 10 + __compaction_max_threshold__ = 200 + __compaction_min_threshold__ = 100 + __compaction_min_sstable_size__ = 1000 + __compaction_tombstone_threshold__ = 0.125 + __compaction_tombstone_compaction_interval__ = 3600 + + pk = columns.Integer(primary_key=True) + + drop_table(SizeTieredCompactionChangesDetectionTest) + sync_table(SizeTieredCompactionChangesDetectionTest) + + assert not update_compaction(SizeTieredCompactionChangesDetectionTest) + def test_alter_actually_alters(self): tmp = copy.deepcopy(LeveledcompactionTestTable) drop_table(tmp) From 2810fead112e32f9082aeafee92c10dec441b296 Mon Sep 17 00:00:00 2001 From: Roey Berman Date: Mon, 16 Dec 2013 15:04:57 +0200 Subject: [PATCH 0566/3726] bugfix: connection.setup - cast port to int --- cqlengine/connection.py | 9 +++++++-- .../connections/test_connection_setup.py | 20 +++++++++++++++++++ 2 files changed, 27 insertions(+), 2 deletions(-) create mode 100644 cqlengine/tests/connections/test_connection_setup.py diff --git a/cqlengine/connection.py b/cqlengine/connection.py index cc352a6549..01b94023fa 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -85,12 +85,17 @@ def setup( host = host.strip() host = host.split(':') if len(host) == 1: - _hosts.append(Host(host[0], 9160)) + port = 9160 elif len(host) == 2: - _hosts.append(Host(*host)) + try: + port = int(host[1]) + except ValueError: + raise CQLConnectionError("Can't parse {}".format(''.join(host))) else: raise CQLConnectionError("Can't parse {}".format(''.join(host))) + _hosts.append(Host(host[0], port)) + if not _hosts: raise CQLConnectionError("At least one host required") diff --git a/cqlengine/tests/connections/test_connection_setup.py b/cqlengine/tests/connections/test_connection_setup.py new file mode 100644 index 0000000000..5f1f7b774c --- /dev/null +++ b/cqlengine/tests/connections/test_connection_setup.py @@ -0,0 +1,20 @@ +from unittest import TestCase +from mock import MagicMock, patch, Mock + +from cqlengine.connection import setup, CQLConnectionError, Host + + +class OperationalErrorLoggingTest(TestCase): + @patch('cqlengine.connection.ConnectionPool', return_value=None, autospec=True) + def test_setup_hosts(self, PatchedConnectionPool): + with self.assertRaises(CQLConnectionError): + setup(hosts=['localhost:abcd']) + self.assertEqual(len(PatchedConnectionPool.mock_calls), 0) + + with self.assertRaises(CQLConnectionError): + setup(hosts=['localhost:9160:abcd']) + self.assertEqual(len(PatchedConnectionPool.mock_calls), 0) + + setup(hosts=['localhost:9161', 'remotehost']) + self.assertEqual(len(PatchedConnectionPool.mock_calls), 1) + self.assertEqual(PatchedConnectionPool.call_args[0][0], [Host('localhost', 9161), Host('remotehost', 9160)]) From d2786f8f791c35fc4a0e3cb9e6cad61a7d7fa137 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 18 Dec 2013 12:19:44 -0800 Subject: [PATCH 0567/3726] throwing timestamp options everywhere --- cqlengine/models.py | 30 +++++++++++++++++++++++++++--- cqlengine/query.py | 3 ++- cqlengine/statements.py | 11 +++++++++-- 3 files changed, 38 insertions(+), 6 deletions(-) diff --git a/cqlengine/models.py b/cqlengine/models.py index 04854c020a..28a5a0fad7 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -93,6 +93,28 @@ def ttl_setter(ts): def __call__(self, *args, **kwargs): raise NotImplementedError +class TimestampDescriptor(object): + """ + returns a query set descriptor with a timestamp specified + """ + def __get__(self, instance, model): + if instance: + # instance method + def timestamp_setter(ts): + instance._timestamp = ts + return instance + return timestamp_setter + + qs = model.__queryset__(model) + + def timestamp_setter(ts): + qs._timestamp = ts + return qs + + return timestamp_setter + + def __call__(self, *args, **kwargs): + raise NotImplementedError class ConsistencyDescriptor(object): """ @@ -200,6 +222,7 @@ class MultipleObjectsReturned(_MultipleObjectsReturned): pass objects = QuerySetDescriptor() ttl = TTLDescriptor() consistency = ConsistencyDescriptor() + timestamp = TimestampDescriptor() #table names will be generated automatically from it's model and package name #however, you can also define them manually here @@ -231,8 +254,9 @@ class MultipleObjectsReturned(_MultipleObjectsReturned): pass __queryset__ = ModelQuerySet __dmlquery__ = DMLQuery - __ttl__ = None + #__ttl__ = None # this doesn't seem to be used __consistency__ = None # can be set per query + __timestamp__ = None # optional timestamp to include with the operation __read_repair_chance__ = 0.1 @@ -257,11 +281,11 @@ def __repr__(self): """ Pretty printing of models by their primary key """ - return '{} <{}>'.format(self.__class__.__name__, + return '{} <{}>'.format(self.__class__.__name__, ', '.join(('{}={}'.format(k, getattr(self, k)) for k,v in self._primary_keys.iteritems())) ) - + @classmethod def _discover_polymorphic_submodels(cls): diff --git a/cqlengine/query.py b/cqlengine/query.py index 78d273c03e..24a3374455 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -684,13 +684,14 @@ class DMLQuery(object): _ttl = None _consistency = None - def __init__(self, model, instance=None, batch=None, ttl=None, consistency=None): + def __init__(self, model, instance=None, batch=None, ttl=None, consistency=None, timestamp=None): self.model = model self.column_family_name = self.model.column_family_name() self.instance = instance self._batch = batch self._ttl = ttl self._consistency = consistency + self._timestamp = timestamp def _execute(self, q): if self._batch: diff --git a/cqlengine/statements.py b/cqlengine/statements.py index 45f3fabead..2afeae0156 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -400,12 +400,13 @@ def __unicode__(self): class BaseCQLStatement(object): """ The base cql statement class """ - def __init__(self, table, consistency=None, where=None): + def __init__(self, table, consistency=None, timestamp=None, where=None): super(BaseCQLStatement, self).__init__() self.table = table self.consistency = consistency self.context_id = 0 self.context_counter = self.context_id + self.timestamp = timestamp self.where_clauses = [] for clause in where or []: @@ -513,7 +514,8 @@ def __init__(self, assignments=None, consistency=None, where=None, - ttl=None): + ttl=None, + timestamp=None): super(AssignmentStatement, self).__init__( table, consistency=consistency, @@ -575,6 +577,9 @@ def __unicode__(self): if self.ttl: qs += ["USING TTL {}".format(self.ttl)] + if self.timestamp: + qs += ["USING TIMESTAMP {}".format(self.timestamp)] + return ' '.join(qs) @@ -586,6 +591,8 @@ def __unicode__(self): if self.ttl: qs += ["USING TTL {}".format(self.ttl)] + if self.timestamp: + qs += ["USING TIMESTAMP {}".format(self.timestamp)] qs += ['SET'] qs += [', '.join([unicode(c) for c in self.assignments])] From 81131ac2af2ab767cbe818f4f41eb7cda34a6423 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 18 Dec 2013 16:00:44 -0800 Subject: [PATCH 0568/3726] added sure library, basic test setup --- cqlengine/statements.py | 22 +++++++++++++++++++++ cqlengine/tests/test_timestamp.py | 32 +++++++++++++++++++++++++++++++ requirements.txt | 1 + 3 files changed, 55 insertions(+) create mode 100644 cqlengine/tests/test_timestamp.py diff --git a/cqlengine/statements.py b/cqlengine/statements.py index 2afeae0156..815ccfe34f 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -1,3 +1,4 @@ +from datetime import datetime, timedelta from cqlengine.functions import QueryValue from cqlengine.operators import BaseWhereOperator, InOperator @@ -444,6 +445,20 @@ def update_context_id(self, i): clause.set_context_id(self.context_counter) self.context_counter += clause.get_context_size() + @property + def timestamp_normalized(self): + if isinstance(self.timestamp, int): + return self.timestamp + + if isinstance(self.timestamp, datetime): + # do stuff + return self.timestamp + + if isinstance(self.timestamp, timedelta): + # do more stuff + return self.timestamp + + def __unicode__(self): raise NotImplementedError @@ -645,6 +660,13 @@ def __unicode__(self): qs += [', '.join(['{}'.format(f) for f in self.fields])] qs += ['FROM', self.table] + delete_option = [] + if self.timestamp: + delete_option += ["TIMESTAMP {}".format(self.timestamp)] + + if delete_option: + qs += ["USING {}".format(" AND ".join(delete_option))] + if self.where_clauses: qs += [self._where] diff --git a/cqlengine/tests/test_timestamp.py b/cqlengine/tests/test_timestamp.py new file mode 100644 index 0000000000..f1e7c91ac3 --- /dev/null +++ b/cqlengine/tests/test_timestamp.py @@ -0,0 +1,32 @@ +""" +Tests surrounding the blah.timestamp( timedelta(seconds=30) ) format. +""" +from datetime import timedelta + +import unittest +from uuid import uuid4 +import sure +from cqlengine import Model, columns +from cqlengine.management import sync_table +from cqlengine.tests.base import BaseCassEngTestCase + + +class TestTimestampModel(Model): + id = columns.UUID(primary_key=True, default=lambda:uuid4()) + count = columns.Integer() + + +class CreateWithTimestampTest(BaseCassEngTestCase): + + @classmethod + def setUpClass(cls): + super(CreateWithTimestampTest, cls).setUpClass() + sync_table(TestTimestampModel) + + def test_batch(self): + pass + + def test_non_batch(self): + tmp = TestTimestampModel.timestamp(timedelta(seconds=30)).create(count=1) + tmp.should.be.ok + diff --git a/requirements.txt b/requirements.txt index 04a6bc4465..1fd68cdd9c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,3 +3,4 @@ ipython==0.13.1 ipdb==0.7 Sphinx==1.1.3 mock==1.0.1 +sure From 9640b1c02e99e3ee045a9416e3aba8866d1452a6 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 18 Dec 2013 16:15:33 -0800 Subject: [PATCH 0569/3726] better class structure for the billions of tests i'm about to write --- cqlengine/models.py | 10 ++++++---- cqlengine/query.py | 5 ++++- cqlengine/tests/test_timestamp.py | 22 ++++++++++++++++++---- 3 files changed, 28 insertions(+), 9 deletions(-) diff --git a/cqlengine/models.py b/cqlengine/models.py index 28a5a0fad7..7b254c9900 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -222,16 +222,18 @@ class MultipleObjectsReturned(_MultipleObjectsReturned): pass objects = QuerySetDescriptor() ttl = TTLDescriptor() consistency = ConsistencyDescriptor() + + # custom timestamps, see USING TIMESTAMP X timestamp = TimestampDescriptor() - #table names will be generated automatically from it's model and package name - #however, you can also define them manually here + # table names will be generated automatically from it's model + # however, you can also define them manually here __table_name__ = None - #the keyspace for this model + # the keyspace for this model __keyspace__ = None - #polymorphism options + # polymorphism options __polymorphic_key__ = None # compaction options diff --git a/cqlengine/query.py b/cqlengine/query.py index 24a3374455..fea1ec196c 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -166,6 +166,7 @@ def __init__(self, model): self._batch = None self._ttl = None self._consistency = None + self._timestamp = None @property def column_family_name(self): @@ -510,7 +511,9 @@ def defer(self, fields): return self._only_or_defer('defer', fields) def create(self, **kwargs): - return self.model(**kwargs).batch(self._batch).ttl(self._ttl).consistency(self._consistency).save() + return self.model(**kwargs).batch(self._batch).ttl(self._ttl).\ + consistency(self._consistency).\ + timestamp(self._timestamp).save() def delete(self): """ diff --git a/cqlengine/tests/test_timestamp.py b/cqlengine/tests/test_timestamp.py index f1e7c91ac3..845b97b9aa 100644 --- a/cqlengine/tests/test_timestamp.py +++ b/cqlengine/tests/test_timestamp.py @@ -5,8 +5,10 @@ import unittest from uuid import uuid4 +import mock import sure from cqlengine import Model, columns +from cqlengine.connection import ConnectionPool from cqlengine.management import sync_table from cqlengine.tests.base import BaseCassEngTestCase @@ -16,17 +18,29 @@ class TestTimestampModel(Model): count = columns.Integer() -class CreateWithTimestampTest(BaseCassEngTestCase): - +class BaseTimestampTest(BaseCassEngTestCase): @classmethod def setUpClass(cls): - super(CreateWithTimestampTest, cls).setUpClass() + super(BaseTimestampTest, cls).setUpClass() sync_table(TestTimestampModel) + +class CreateWithTimestampTest(BaseTimestampTest): + def test_batch(self): pass - def test_non_batch(self): + def test_non_batch_syntax_integration(self): tmp = TestTimestampModel.timestamp(timedelta(seconds=30)).create(count=1) tmp.should.be.ok + def test_non_batch_syntax_unit(self): + + with mock.patch.object(ConnectionPool, "execute") as m: + TestTimestampModel.timestamp(timedelta(seconds=30)).create(count=1) + + query = m.call_args[0][0] + + "USING TIMESTAMP".should.be.within(query) + + From a4c439af10439bea496fc52cf1dadad9de0e713d Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 18 Dec 2013 16:18:29 -0800 Subject: [PATCH 0570/3726] ensure we're only operating on a copy of the instance --- cqlengine/models.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/cqlengine/models.py b/cqlengine/models.py index 7b254c9900..869a02012b 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -1,6 +1,6 @@ from collections import OrderedDict import re - +import copy from cqlengine import columns from cqlengine.exceptions import ModelException, CQLEngineException, ValidationError from cqlengine.query import ModelQuerySet, DMLQuery, AbstractQueryableColumn @@ -76,6 +76,7 @@ class TTLDescriptor(object): """ def __get__(self, instance, model): if instance: + instance = copy.deepcopy(instance) # instance method def ttl_setter(ts): instance._ttl = ts @@ -100,6 +101,7 @@ class TimestampDescriptor(object): def __get__(self, instance, model): if instance: # instance method + instance = copy.deepcopy(instance) def timestamp_setter(ts): instance._timestamp = ts return instance @@ -122,6 +124,7 @@ class ConsistencyDescriptor(object): """ def __get__(self, instance, model): if instance: + instance = copy.deepcopy(instance) def consistency_setter(consistency): instance.__consistency__ = consistency return instance From 74e55366d5ac1bc7ee27316da35fdffba4edaeed Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 18 Dec 2013 16:29:41 -0800 Subject: [PATCH 0571/3726] tracking down issues with missing timestamp values --- cqlengine/models.py | 4 +++- cqlengine/query.py | 3 ++- cqlengine/tests/test_timestamp.py | 5 +++++ 3 files changed, 10 insertions(+), 2 deletions(-) diff --git a/cqlengine/models.py b/cqlengine/models.py index 869a02012b..a78678e5e2 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -261,7 +261,7 @@ class MultipleObjectsReturned(_MultipleObjectsReturned): pass #__ttl__ = None # this doesn't seem to be used __consistency__ = None # can be set per query - __timestamp__ = None # optional timestamp to include with the operation + __timestamp__ = None # optional timestamp to include with the operation (USING TIMESTAMP) __read_repair_chance__ = 0.1 @@ -464,6 +464,7 @@ def save(self): self.__dmlquery__(self.__class__, self, batch=self._batch, ttl=self._ttl, + timestamp=self._timestamp, consistency=self.__consistency__).save() #reset the value managers @@ -498,6 +499,7 @@ def update(self, **values): self.__dmlquery__(self.__class__, self, batch=self._batch, ttl=self._ttl, + timestamp=self.__timestamp__, consistency=self.__consistency__).update() #reset the value managers diff --git a/cqlengine/query.py b/cqlengine/query.py index fea1ec196c..9de5a745f8 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -686,6 +686,7 @@ class DMLQuery(object): """ _ttl = None _consistency = None + _timestamp = None def __init__(self, model, instance=None, batch=None, ttl=None, consistency=None, timestamp=None): self.model = model @@ -804,7 +805,7 @@ def save(self): if self.instance._has_counter or self.instance._can_update(): return self.update() else: - insert = InsertStatement(self.column_family_name, ttl=self._ttl) + insert = InsertStatement(self.column_family_name, ttl=self._ttl, timestamp=self._timestamp) for name, col in self.instance._columns.items(): val = getattr(self.instance, name, None) if col._val_is_null(val): diff --git a/cqlengine/tests/test_timestamp.py b/cqlengine/tests/test_timestamp.py index 845b97b9aa..c58952c183 100644 --- a/cqlengine/tests/test_timestamp.py +++ b/cqlengine/tests/test_timestamp.py @@ -30,6 +30,11 @@ class CreateWithTimestampTest(BaseTimestampTest): def test_batch(self): pass + def test_timestamp_is_set_on_model_queryset(self): + delta = timedelta(seconds=30) + tmp = TestTimestampModel.timestamp(delta) + tmp._timestamp.should.equal(delta) + def test_non_batch_syntax_integration(self): tmp = TestTimestampModel.timestamp(timedelta(seconds=30)).create(count=1) tmp.should.be.ok From 67ccf4ce69eb53ef1d350d17141c0ee6ba21de8f Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 18 Dec 2013 16:30:42 -0800 Subject: [PATCH 0572/3726] ensuring we store the timestamp on the insert statement --- cqlengine/statements.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cqlengine/statements.py b/cqlengine/statements.py index 815ccfe34f..da1bcb38ae 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -621,11 +621,12 @@ def __unicode__(self): class DeleteStatement(BaseCQLStatement): """ a cql delete statement """ - def __init__(self, table, fields=None, consistency=None, where=None): + def __init__(self, table, fields=None, consistency=None, where=None, timestamp=None): super(DeleteStatement, self).__init__( table, consistency=consistency, where=where, + timestamp=timestamp ) self.fields = [] if isinstance(fields, basestring): From 3ecad5814a8d14b7886f5f29b8d93d6a434f0bbe Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 18 Dec 2013 16:32:50 -0800 Subject: [PATCH 0573/3726] assignment statement saves the timestamp --- cqlengine/statements.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cqlengine/statements.py b/cqlengine/statements.py index da1bcb38ae..d8d9121e8e 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -537,6 +537,7 @@ def __init__(self, where=where, ) self.ttl = ttl + self.timestamp = timestamp # add assignments self.assignments = [] From 826b5b78660e472e91cfe848b51e677c1d0e4ccb Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 18 Dec 2013 16:46:13 -0800 Subject: [PATCH 0574/3726] standard insert seems to work with custom timestamp --- cqlengine/statements.py | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/cqlengine/statements.py b/cqlengine/statements.py index d8d9121e8e..1ffc479058 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -447,16 +447,23 @@ def update_context_id(self, i): @property def timestamp_normalized(self): - if isinstance(self.timestamp, int): - return self.timestamp - - if isinstance(self.timestamp, datetime): - # do stuff + """ + we're expecting self.timestamp to be either a long, a datetime, or a timedelta + :return: + """ + if isinstance(self.timestamp, (int, long)): return self.timestamp if isinstance(self.timestamp, timedelta): - # do more stuff - return self.timestamp + tmp = datetime.now() + self.timestamp + else: + tmp = self.timestamp + + epoch = datetime(1970, 1, 1, tzinfo=tmp.tzinfo) + + # do more stuff + offset = epoch.tzinfo.utcoffset(epoch).total_seconds() if epoch.tzinfo else 0 + return long(((tmp - epoch).total_seconds() - offset) * 1000) def __unicode__(self): @@ -594,7 +601,7 @@ def __unicode__(self): qs += ["USING TTL {}".format(self.ttl)] if self.timestamp: - qs += ["USING TIMESTAMP {}".format(self.timestamp)] + qs += ["USING TIMESTAMP {}".format(self.timestamp_normalized)] return ' '.join(qs) From 3f52742374930c710362dc5d5ee5226569aa15e9 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 18 Dec 2013 17:18:32 -0800 Subject: [PATCH 0575/3726] standardizing on _timestamp across the board --- cqlengine/models.py | 7 +++++-- cqlengine/statements.py | 3 +++ cqlengine/tests/test_timestamp.py | 18 ++++++++++++++++++ cqlengine/tests/test_ttl.py | 4 ++-- 4 files changed, 28 insertions(+), 4 deletions(-) diff --git a/cqlengine/models.py b/cqlengine/models.py index a78678e5e2..294f1a57e4 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -261,13 +261,16 @@ class MultipleObjectsReturned(_MultipleObjectsReturned): pass #__ttl__ = None # this doesn't seem to be used __consistency__ = None # can be set per query - __timestamp__ = None # optional timestamp to include with the operation (USING TIMESTAMP) __read_repair_chance__ = 0.1 + + _timestamp = None # optional timestamp to include with the operation (USING TIMESTAMP) + def __init__(self, **values): self._values = {} self._ttl = None + self._timestamp = None for name, column in self._columns.items(): value = values.get(name, None) @@ -499,7 +502,7 @@ def update(self, **values): self.__dmlquery__(self.__class__, self, batch=self._batch, ttl=self._ttl, - timestamp=self.__timestamp__, + timestamp=self._timestamp, consistency=self.__consistency__).update() #reset the value managers diff --git a/cqlengine/statements.py b/cqlengine/statements.py index 1ffc479058..dd5042d33f 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -451,6 +451,9 @@ def timestamp_normalized(self): we're expecting self.timestamp to be either a long, a datetime, or a timedelta :return: """ + if not self.timestamp: + return None + if isinstance(self.timestamp, (int, long)): return self.timestamp diff --git a/cqlengine/tests/test_timestamp.py b/cqlengine/tests/test_timestamp.py index c58952c183..ea0c42a94c 100644 --- a/cqlengine/tests/test_timestamp.py +++ b/cqlengine/tests/test_timestamp.py @@ -30,6 +30,12 @@ class CreateWithTimestampTest(BaseTimestampTest): def test_batch(self): pass + def test_timestamp_not_included_on_normal_create(self): + with mock.patch.object(ConnectionPool, "execute") as m: + TestTimestampModel.create(count=2) + + "USING TIMESTAMP".shouldnt.be.within(m.call_args[0][0]) + def test_timestamp_is_set_on_model_queryset(self): delta = timedelta(seconds=30) tmp = TestTimestampModel.timestamp(delta) @@ -49,3 +55,15 @@ def test_non_batch_syntax_unit(self): "USING TIMESTAMP".should.be.within(query) +class UpdateWithTimestampTest(BaseTimestampTest): + def setUp(self): + self.instance = TestTimestampModel.create(count=1) + + def test_instance_update_includes_timestamp_in_query(self): + + with mock.patch.object(ConnectionPool, "execute") as m: + self.instance.timestamp(timedelta(seconds=30)).update(count=2) + + query = m.call_args[0][0] + + "USING TIMESTAMP".should.be.within(query) diff --git a/cqlengine/tests/test_ttl.py b/cqlengine/tests/test_ttl.py index 0d465ac09a..c08a1a19e0 100644 --- a/cqlengine/tests/test_ttl.py +++ b/cqlengine/tests/test_ttl.py @@ -80,13 +80,13 @@ def test_instance_is_returned(self): """ o = TestTTLModel.create(text="whatever") o.text = "new stuff" - o.ttl(60) + o = o.ttl(60) self.assertEqual(60, o._ttl) def test_ttl_is_include_with_query_on_update(self): o = TestTTLModel.create(text="whatever") o.text = "new stuff" - o.ttl(60) + o = o.ttl(60) with mock.patch.object(ConnectionPool, 'execute') as m: o.save() From 44baa39e54c761b60daa3c3b7312d4b1dd86e4fd Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 18 Dec 2013 17:38:46 -0800 Subject: [PATCH 0576/3726] fixed deprecated calls, it seems that timestamp is working but broke batch --- cqlengine/query.py | 4 ++-- cqlengine/statements.py | 10 ++++++++-- cqlengine/tests/test_batch_query.py | 10 ++++++---- cqlengine/tests/test_timestamp.py | 4 +--- 4 files changed, 17 insertions(+), 11 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index 9de5a745f8..d237e4a721 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -647,7 +647,7 @@ def update(self, **values): return nulled_columns = set() - us = UpdateStatement(self.column_family_name, where=self._where, ttl=self._ttl) + us = UpdateStatement(self.column_family_name, where=self._where, ttl=self._ttl, timestamp=self._timestamp) for name, val in values.items(): col = self.model._columns.get(name) # check for nonexistant columns @@ -746,7 +746,7 @@ def update(self): raise CQLEngineException("DML Query intance attribute is None") assert type(self.instance) == self.model - statement = UpdateStatement(self.column_family_name, ttl=self._ttl) + statement = UpdateStatement(self.column_family_name, ttl=self._ttl, timestamp=self._timestamp) #get defined fields and their column names for name, col in self.model._columns.items(): if not col.is_primary_key: diff --git a/cqlengine/statements.py b/cqlengine/statements.py index dd5042d33f..e004b33bd1 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -615,10 +615,16 @@ class UpdateStatement(AssignmentStatement): def __unicode__(self): qs = ['UPDATE', self.table] + using_options = [] + if self.ttl: - qs += ["USING TTL {}".format(self.ttl)] + using_options += ["TTL {}".format(self.ttl)] + if self.timestamp: - qs += ["USING TIMESTAMP {}".format(self.timestamp)] + using_options += ["TIMESTAMP {}".format(self.timestamp_normalized)] + + if using_options: + qs += ["USING {}".format(" AND ".join(using_options))] qs += ['SET'] qs += [', '.join([unicode(c) for c in self.assignments])] diff --git a/cqlengine/tests/test_batch_query.py b/cqlengine/tests/test_batch_query.py index 7ee98b7084..e55c37a20f 100644 --- a/cqlengine/tests/test_batch_query.py +++ b/cqlengine/tests/test_batch_query.py @@ -1,8 +1,10 @@ from unittest import skip from uuid import uuid4 import random +import sure + from cqlengine import Model, columns -from cqlengine.management import delete_table, create_table +from cqlengine.management import drop_table, sync_table from cqlengine.query import BatchQuery from cqlengine.tests.base import BaseCassEngTestCase @@ -17,13 +19,13 @@ class BatchQueryTests(BaseCassEngTestCase): @classmethod def setUpClass(cls): super(BatchQueryTests, cls).setUpClass() - delete_table(TestMultiKeyModel) - create_table(TestMultiKeyModel) + drop_table(TestMultiKeyModel) + sync_table(TestMultiKeyModel) @classmethod def tearDownClass(cls): super(BatchQueryTests, cls).tearDownClass() - delete_table(TestMultiKeyModel) + drop_table(TestMultiKeyModel) def setUp(self): super(BatchQueryTests, self).setUp() diff --git a/cqlengine/tests/test_timestamp.py b/cqlengine/tests/test_timestamp.py index ea0c42a94c..58409b1253 100644 --- a/cqlengine/tests/test_timestamp.py +++ b/cqlengine/tests/test_timestamp.py @@ -64,6 +64,4 @@ def test_instance_update_includes_timestamp_in_query(self): with mock.patch.object(ConnectionPool, "execute") as m: self.instance.timestamp(timedelta(seconds=30)).update(count=2) - query = m.call_args[0][0] - - "USING TIMESTAMP".should.be.within(query) + "USING TIMESTAMP".should.be.within(m.call_args[0][0]) From 2d62e523f1ea638730d4153f0406e53d0f97f55a Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 18 Dec 2013 17:51:51 -0800 Subject: [PATCH 0577/3726] working to fix batches --- cqlengine/tests/test_batch_query.py | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/cqlengine/tests/test_batch_query.py b/cqlengine/tests/test_batch_query.py index e55c37a20f..7698ee68bd 100644 --- a/cqlengine/tests/test_batch_query.py +++ b/cqlengine/tests/test_batch_query.py @@ -1,6 +1,9 @@ from unittest import skip from uuid import uuid4 import random +from cqlengine.connection import ConnectionPool + +import mock import sure from cqlengine import Model, columns @@ -38,13 +41,26 @@ def test_insert_success_case(self): b = BatchQuery() inst = TestMultiKeyModel.batch(b).create(partition=self.pkey, cluster=2, count=3, text='4') + with self.assertRaises(TestMultiKeyModel.DoesNotExist): TestMultiKeyModel.get(partition=self.pkey, cluster=2) - b.execute() + with mock.patch.object(ConnectionPool, 'execute') as m: + b.execute() + + m.call_count.should.be(1) TestMultiKeyModel.get(partition=self.pkey, cluster=2) + def test_batch_is_executed(self): + b = BatchQuery() + inst = TestMultiKeyModel.batch(b).create(partition=self.pkey, cluster=2, count=3, text='4') + + with self.assertRaises(TestMultiKeyModel.DoesNotExist): + TestMultiKeyModel.get(partition=self.pkey, cluster=2) + + b.execute() + def test_update_success_case(self): inst = TestMultiKeyModel.create(partition=self.pkey, cluster=2, count=3, text='4') From 5407f41e951a4a34c483170bf6935fc9b38bdc1a Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 18 Dec 2013 17:58:28 -0800 Subject: [PATCH 0578/3726] fixed brokenness... --- cqlengine/models.py | 6 +++--- cqlengine/tests/test_batch_query.py | 4 +--- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/cqlengine/models.py b/cqlengine/models.py index 294f1a57e4..df528746f9 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -76,7 +76,7 @@ class TTLDescriptor(object): """ def __get__(self, instance, model): if instance: - instance = copy.deepcopy(instance) + #instance = copy.deepcopy(instance) # instance method def ttl_setter(ts): instance._ttl = ts @@ -101,7 +101,7 @@ class TimestampDescriptor(object): def __get__(self, instance, model): if instance: # instance method - instance = copy.deepcopy(instance) + #instance = copy.deepcopy(instance) def timestamp_setter(ts): instance._timestamp = ts return instance @@ -124,7 +124,7 @@ class ConsistencyDescriptor(object): """ def __get__(self, instance, model): if instance: - instance = copy.deepcopy(instance) + #instance = copy.deepcopy(instance) def consistency_setter(consistency): instance.__consistency__ = consistency return instance diff --git a/cqlengine/tests/test_batch_query.py b/cqlengine/tests/test_batch_query.py index 7698ee68bd..4beb33d9da 100644 --- a/cqlengine/tests/test_batch_query.py +++ b/cqlengine/tests/test_batch_query.py @@ -45,10 +45,8 @@ def test_insert_success_case(self): with self.assertRaises(TestMultiKeyModel.DoesNotExist): TestMultiKeyModel.get(partition=self.pkey, cluster=2) - with mock.patch.object(ConnectionPool, 'execute') as m: - b.execute() + b.execute() - m.call_count.should.be(1) TestMultiKeyModel.get(partition=self.pkey, cluster=2) From 55cbca4127c396944093219276dc99226be69d1b Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 19 Dec 2013 12:42:29 -0800 Subject: [PATCH 0579/3726] delete is pushing timestamp in when it's specified --- cqlengine/models.py | 8 +++++++- cqlengine/query.py | 5 +++-- cqlengine/statements.py | 5 +++-- cqlengine/tests/test_timestamp.py | 23 +++++++++++++++++++++++ 4 files changed, 36 insertions(+), 5 deletions(-) diff --git a/cqlengine/models.py b/cqlengine/models.py index df528746f9..838028f46c 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -514,7 +514,7 @@ def update(self, **values): def delete(self): """ Deletes this instance """ - self.__dmlquery__(self.__class__, self, batch=self._batch).delete() + self.__dmlquery__(self.__class__, self, batch=self._batch, timestamp=self._timestamp).delete() def get_changed_columns(self): """ returns a list of the columns that have been updated since instantiation or save """ @@ -528,9 +528,15 @@ def _inst_batch(self, batch): self._batch = batch return self + # def __deepcopy__(self): + # tmp = type(self)() + # tmp.__dict__.update(self.__dict__) + # return tmp + batch = hybrid_classmethod(_class_batch, _inst_batch) + class ModelMetaClass(type): def __new__(cls, name, bases, attrs): diff --git a/cqlengine/query.py b/cqlengine/query.py index d237e4a721..0cad904477 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -526,7 +526,8 @@ def delete(self): dq = DeleteStatement( self.column_family_name, - where=self._where + where=self._where, + timestamp=self._timestamp ) self._execute(dq) @@ -830,7 +831,7 @@ def delete(self): if self.instance is None: raise CQLEngineException("DML Query instance attribute is None") - ds = DeleteStatement(self.column_family_name) + ds = DeleteStatement(self.column_family_name, timestamp=self._timestamp) for name, col in self.model._primary_keys.items(): ds.add_where_clause(WhereClause( col.db_field_name, diff --git a/cqlengine/statements.py b/cqlengine/statements.py index e004b33bd1..766f590317 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -679,11 +679,12 @@ def __unicode__(self): qs += ['FROM', self.table] delete_option = [] + if self.timestamp: - delete_option += ["TIMESTAMP {}".format(self.timestamp)] + delete_option += ["TIMESTAMP {}".format(self.timestamp_normalized)] if delete_option: - qs += ["USING {}".format(" AND ".join(delete_option))] + qs += [" USING {} ".format(" AND ".join(delete_option))] if self.where_clauses: qs += [self._where] diff --git a/cqlengine/tests/test_timestamp.py b/cqlengine/tests/test_timestamp.py index 58409b1253..43583f6031 100644 --- a/cqlengine/tests/test_timestamp.py +++ b/cqlengine/tests/test_timestamp.py @@ -65,3 +65,26 @@ def test_instance_update_includes_timestamp_in_query(self): self.instance.timestamp(timedelta(seconds=30)).update(count=2) "USING TIMESTAMP".should.be.within(m.call_args[0][0]) + +class DeleteWithTimestampTest(BaseTimestampTest): + def test_non_batch(self): + """ + we don't expect the model to come back at the end because the deletion timestamp should be in the future + """ + uid = uuid4() + tmp = TestTimestampModel.create(id=uid, count=1) + + TestTimestampModel.get(id=uid).should.be.ok + + tmp.timestamp(timedelta(seconds=30)).delete() + + tmp = TestTimestampModel.create(id=uid, count=1) + + with self.assertRaises(TestTimestampModel.DoesNotExist): + TestTimestampModel.get(id=uid) + + + + + + From c393d433c1c766b0f2bb2905acd40a7272616d9f Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 19 Dec 2013 15:50:42 -0800 Subject: [PATCH 0580/3726] fixed timezone issue. python must be on same TZ as cassandra when using timedelta and .timestamp --- cqlengine/statements.py | 8 ++------ cqlengine/tests/test_timestamp.py | 5 ++++- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/cqlengine/statements.py b/cqlengine/statements.py index 766f590317..0e85c9292e 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -448,7 +448,7 @@ def update_context_id(self, i): @property def timestamp_normalized(self): """ - we're expecting self.timestamp to be either a long, a datetime, or a timedelta + we're expecting self.timestamp to be either a long, int, a datetime, or a timedelta :return: """ if not self.timestamp: @@ -462,11 +462,7 @@ def timestamp_normalized(self): else: tmp = self.timestamp - epoch = datetime(1970, 1, 1, tzinfo=tmp.tzinfo) - - # do more stuff - offset = epoch.tzinfo.utcoffset(epoch).total_seconds() if epoch.tzinfo else 0 - return long(((tmp - epoch).total_seconds() - offset) * 1000) + return long(((tmp - datetime.fromtimestamp(0)).total_seconds()) * 1000000) def __unicode__(self): diff --git a/cqlengine/tests/test_timestamp.py b/cqlengine/tests/test_timestamp.py index 43583f6031..4c9901c78d 100644 --- a/cqlengine/tests/test_timestamp.py +++ b/cqlengine/tests/test_timestamp.py @@ -76,7 +76,10 @@ def test_non_batch(self): TestTimestampModel.get(id=uid).should.be.ok - tmp.timestamp(timedelta(seconds=30)).delete() + tmp.timestamp(timedelta(seconds=5)).delete() + + with self.assertRaises(TestTimestampModel.DoesNotExist): + TestTimestampModel.get(id=uid) tmp = TestTimestampModel.create(id=uid, count=1) From a327b462dd9308c01e7ca524780330d90165d6d7 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 19 Dec 2013 16:25:50 -0800 Subject: [PATCH 0581/3726] updated changelog --- changelog | 8 ++++++++ cqlengine/tests/test_timestamp.py | 6 ++++-- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/changelog b/changelog index 28ce74dff7..806b3c442d 100644 --- a/changelog +++ b/changelog @@ -1,5 +1,13 @@ CHANGELOG +0.11.0 + +* support for USING TIMESTAMP + +0.10.0 + +* support for execute_on_exception within batches + 0.9.2 * fixing create keyspace with network topology strategy * fixing regression with query expressions diff --git a/cqlengine/tests/test_timestamp.py b/cqlengine/tests/test_timestamp.py index 4c9901c78d..647673b830 100644 --- a/cqlengine/tests/test_timestamp.py +++ b/cqlengine/tests/test_timestamp.py @@ -7,7 +7,7 @@ from uuid import uuid4 import mock import sure -from cqlengine import Model, columns +from cqlengine import Model, columns, BatchQuery from cqlengine.connection import ConnectionPool from cqlengine.management import sync_table from cqlengine.tests.base import BaseCassEngTestCase @@ -25,10 +25,12 @@ def setUpClass(cls): sync_table(TestTimestampModel) + + class CreateWithTimestampTest(BaseTimestampTest): def test_batch(self): - pass + assert False def test_timestamp_not_included_on_normal_create(self): with mock.patch.object(ConnectionPool, "execute") as m: From 817e386f35c0ea69b9f4e6d99de17a42d7a3a1c5 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 19 Dec 2013 16:26:06 -0800 Subject: [PATCH 0582/3726] changelog --- changelog | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/changelog b/changelog index 806b3c442d..cca5fbd101 100644 --- a/changelog +++ b/changelog @@ -1,6 +1,6 @@ CHANGELOG -0.11.0 +0.11.0 (in progress) * support for USING TIMESTAMP From 6cb5a6caa3985aa3ff2ae437ad788c5b7213d5f4 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 19 Dec 2013 17:26:08 -0800 Subject: [PATCH 0583/3726] fixed batch queries --- changelog | 2 ++ cqlengine/query.py | 16 ++++++++++++---- cqlengine/tests/test_timestamp.py | 6 ++++++ 3 files changed, 20 insertions(+), 4 deletions(-) diff --git a/changelog b/changelog index cca5fbd101..0764800fb2 100644 --- a/changelog +++ b/changelog @@ -3,6 +3,8 @@ CHANGELOG 0.11.0 (in progress) * support for USING TIMESTAMP + - allows for long, timedelta, and datetime +* fixed use of USING TIMESTAMP in batches 0.10.0 diff --git a/cqlengine/query.py b/cqlengine/query.py index 0cad904477..a21720fcf9 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -1,5 +1,5 @@ import copy -from datetime import datetime +from datetime import datetime, timedelta from cqlengine import BaseContainerColumn, Map, columns from cqlengine.columns import Counter, List, Set @@ -82,7 +82,7 @@ class BatchQuery(object): def __init__(self, batch_type=None, timestamp=None, consistency=None, execute_on_exception=False): self.queries = [] self.batch_type = batch_type - if timestamp is not None and not isinstance(timestamp, datetime): + if timestamp is not None and not isinstance(timestamp, (datetime, timedelta)): raise CQLEngineException('timestamp object must be an instance of datetime') self.timestamp = timestamp self._consistency = consistency @@ -103,8 +103,16 @@ def execute(self): opener = 'BEGIN ' + (self.batch_type + ' ' if self.batch_type else '') + ' BATCH' if self.timestamp: - epoch = datetime(1970, 1, 1) - ts = long((self.timestamp - epoch).total_seconds() * 1000) + + if isinstance(self.timestamp, (int, long)): + ts = self.timestamp + elif isinstance(self.timestamp, timedelta): + ts = long((datetime.now() + self.timestamp - datetime.fromtimestamp(0)).total_seconds() * 1000000) + elif isinstance(self.timestamp, datetime): + ts = long((self.timestamp - datetime.fromtimestamp(0)).total_seconds() * 1000000) + else: + raise ValueError("Batch expects a long, a timedelta, or a datetime") + opener += ' USING TIMESTAMP {}'.format(ts) query_list = [opener] diff --git a/cqlengine/tests/test_timestamp.py b/cqlengine/tests/test_timestamp.py index 647673b830..386d6db1ff 100644 --- a/cqlengine/tests/test_timestamp.py +++ b/cqlengine/tests/test_timestamp.py @@ -24,6 +24,12 @@ def setUpClass(cls): super(BaseTimestampTest, cls).setUpClass() sync_table(TestTimestampModel) +class BatchTest(BaseTimestampTest): + def test_batch_is_included(self): + with mock.patch.object(ConnectionPool, "execute") as m, BatchQuery(timestamp=timedelta(seconds=30)) as b: + TestTimestampModel.batch(b).create(count=1) + + "USING TIMESTAMP".should.be.within(m.call_args[0][0]) From 97b439bf9507c3f072b1b5a00f41d24102ae33fb Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Fri, 20 Dec 2013 14:37:45 -0800 Subject: [PATCH 0584/3726] timestamp working in batch mode --- cqlengine/tests/test_timestamp.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/cqlengine/tests/test_timestamp.py b/cqlengine/tests/test_timestamp.py index 386d6db1ff..33c2a9c2a9 100644 --- a/cqlengine/tests/test_timestamp.py +++ b/cqlengine/tests/test_timestamp.py @@ -36,7 +36,11 @@ def test_batch_is_included(self): class CreateWithTimestampTest(BaseTimestampTest): def test_batch(self): - assert False + with mock.patch.object(ConnectionPool, "execute") as m, BatchQuery() as b: + TestTimestampModel.timestamp(timedelta(seconds=10)).batch(b).create(count=1) + + "USING TIMESTAMP".should.be.within(m.call_args[0][0]) + def test_timestamp_not_included_on_normal_create(self): with mock.patch.object(ConnectionPool, "execute") as m: From ee4f9af63400faa0753d67b23ec77f9f96316d5e Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Fri, 20 Dec 2013 15:38:46 -0800 Subject: [PATCH 0585/3726] use a regex instead of simple text match for better error catching --- cqlengine/tests/test_timestamp.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cqlengine/tests/test_timestamp.py b/cqlengine/tests/test_timestamp.py index 33c2a9c2a9..0bb6a8ca90 100644 --- a/cqlengine/tests/test_timestamp.py +++ b/cqlengine/tests/test_timestamp.py @@ -39,7 +39,7 @@ def test_batch(self): with mock.patch.object(ConnectionPool, "execute") as m, BatchQuery() as b: TestTimestampModel.timestamp(timedelta(seconds=10)).batch(b).create(count=1) - "USING TIMESTAMP".should.be.within(m.call_args[0][0]) + m.call_args[0][0].should.match(r"INSERT.*USING TIMESTAMP") def test_timestamp_not_included_on_normal_create(self): From ea203929a4cdcdf6ec47f7d8204ab29da4e3585f Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Fri, 20 Dec 2013 15:41:52 -0800 Subject: [PATCH 0586/3726] more testing around our query --- cqlengine/tests/test_timestamp.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/cqlengine/tests/test_timestamp.py b/cqlengine/tests/test_timestamp.py index 0bb6a8ca90..32bebcd252 100644 --- a/cqlengine/tests/test_timestamp.py +++ b/cqlengine/tests/test_timestamp.py @@ -39,7 +39,11 @@ def test_batch(self): with mock.patch.object(ConnectionPool, "execute") as m, BatchQuery() as b: TestTimestampModel.timestamp(timedelta(seconds=10)).batch(b).create(count=1) - m.call_args[0][0].should.match(r"INSERT.*USING TIMESTAMP") + query = m.call_args[0][0] + + query.should.match(r"INSERT.*USING TIMESTAMP") + query.should_not.match(r"TIMESTAMP.*INSERT") + def test_timestamp_not_included_on_normal_create(self): From be20a9a9f03d7b6e80ead12a0bd423d26162ff12 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Fri, 20 Dec 2013 15:57:19 -0800 Subject: [PATCH 0587/3726] checked batch updates for timestamp --- cqlengine/tests/test_timestamp.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/cqlengine/tests/test_timestamp.py b/cqlengine/tests/test_timestamp.py index 32bebcd252..6aec5702fd 100644 --- a/cqlengine/tests/test_timestamp.py +++ b/cqlengine/tests/test_timestamp.py @@ -40,7 +40,7 @@ def test_batch(self): TestTimestampModel.timestamp(timedelta(seconds=10)).batch(b).create(count=1) query = m.call_args[0][0] - + query.should.match(r"INSERT.*USING TIMESTAMP") query.should_not.match(r"TIMESTAMP.*INSERT") @@ -76,12 +76,20 @@ def setUp(self): self.instance = TestTimestampModel.create(count=1) def test_instance_update_includes_timestamp_in_query(self): + # not a batch with mock.patch.object(ConnectionPool, "execute") as m: self.instance.timestamp(timedelta(seconds=30)).update(count=2) "USING TIMESTAMP".should.be.within(m.call_args[0][0]) + def test_instance_update_in_batch(self): + with mock.patch.object(ConnectionPool, "execute") as m, BatchQuery() as b: + self.instance.batch(b).timestamp(timedelta(seconds=30)).update(count=2) + + query = m.call_args[0][0] + "USING TIMESTAMP".should.be.within(query) + class DeleteWithTimestampTest(BaseTimestampTest): def test_non_batch(self): """ From ba22b4c9b62618f83e952b0bf60615b8f8dba426 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Fri, 20 Dec 2013 16:26:25 -0800 Subject: [PATCH 0588/3726] blind deletes function properly --- cqlengine/query.py | 5 +++++ cqlengine/tests/test_timestamp.py | 18 ++++++++++++++++++ 2 files changed, 23 insertions(+) diff --git a/cqlengine/query.py b/cqlengine/query.py index a21720fcf9..f991f5e9cb 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -650,6 +650,11 @@ def ttl(self, ttl): clone._ttl = ttl return clone + def timestamp(self, timestamp): + clone = copy.deepcopy(self) + clone._timestamp = timestamp + return clone + def update(self, **values): """ Updates the rows in this queryset """ if not values: diff --git a/cqlengine/tests/test_timestamp.py b/cqlengine/tests/test_timestamp.py index 6aec5702fd..48ff93d5be 100644 --- a/cqlengine/tests/test_timestamp.py +++ b/cqlengine/tests/test_timestamp.py @@ -111,6 +111,24 @@ def test_non_batch(self): TestTimestampModel.get(id=uid) + def test_blind_delete(self): + """ + we don't expect the model to come back at the end because the deletion timestamp should be in the future + """ + uid = uuid4() + tmp = TestTimestampModel.create(id=uid, count=1) + + TestTimestampModel.get(id=uid).should.be.ok + + TestTimestampModel.objects(id=uid).timestamp(timedelta(seconds=5)).delete() + + with self.assertRaises(TestTimestampModel.DoesNotExist): + TestTimestampModel.get(id=uid) + + tmp = TestTimestampModel.create(id=uid, count=1) + + with self.assertRaises(TestTimestampModel.DoesNotExist): + TestTimestampModel.get(id=uid) From c0eb900ede9e6859e888fe137534834b63740e04 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Fri, 20 Dec 2013 16:38:27 -0800 Subject: [PATCH 0589/3726] updated docs with timestamp information, test that timestamps in the past work properly --- cqlengine/tests/test_timestamp.py | 35 ++++++++++++++++++++++++++++++- docs/topics/models.rst | 4 ++++ docs/topics/queryset.rst | 3 +++ 3 files changed, 41 insertions(+), 1 deletion(-) diff --git a/cqlengine/tests/test_timestamp.py b/cqlengine/tests/test_timestamp.py index 48ff93d5be..33ec3624ef 100644 --- a/cqlengine/tests/test_timestamp.py +++ b/cqlengine/tests/test_timestamp.py @@ -1,7 +1,7 @@ """ Tests surrounding the blah.timestamp( timedelta(seconds=30) ) format. """ -from datetime import timedelta +from datetime import timedelta, datetime import unittest from uuid import uuid4 @@ -130,6 +130,39 @@ def test_blind_delete(self): with self.assertRaises(TestTimestampModel.DoesNotExist): TestTimestampModel.get(id=uid) + def test_blind_delete_with_datetime(self): + """ + we don't expect the model to come back at the end because the deletion timestamp should be in the future + """ + uid = uuid4() + tmp = TestTimestampModel.create(id=uid, count=1) + + TestTimestampModel.get(id=uid).should.be.ok + + plus_five_seconds = datetime.now() + timedelta(seconds=5) + + TestTimestampModel.objects(id=uid).timestamp(plus_five_seconds).delete() + + with self.assertRaises(TestTimestampModel.DoesNotExist): + TestTimestampModel.get(id=uid) + + tmp = TestTimestampModel.create(id=uid, count=1) + + with self.assertRaises(TestTimestampModel.DoesNotExist): + TestTimestampModel.get(id=uid) + + def test_delete_in_the_past(self): + uid = uuid4() + tmp = TestTimestampModel.create(id=uid, count=1) + + TestTimestampModel.get(id=uid).should.be.ok + + # delete the in past, should not affect the object created above + TestTimestampModel.objects(id=uid).timestamp(timedelta(seconds=-60)).delete() + + TestTimestampModel.get(id=uid) + + diff --git a/docs/topics/models.rst b/docs/topics/models.rst index 1fae6070d4..b34d7f5332 100644 --- a/docs/topics/models.rst +++ b/docs/topics/models.rst @@ -141,6 +141,10 @@ Model Methods Sets the batch object to run instance updates and inserts queries with. + .. method:: timestamp(timedelta_or_datetime) + + Sets the timestamp for the query + .. method:: ttl(ttl_in_sec) Sets the ttl values to run instance updates and inserts queries with. diff --git a/docs/topics/queryset.rst b/docs/topics/queryset.rst index 4cf0d20488..569fb58103 100644 --- a/docs/topics/queryset.rst +++ b/docs/topics/queryset.rst @@ -377,6 +377,9 @@ QuerySet method reference Enables the (usually) unwise practive of querying on a clustering key without also defining a partition key + .. method:: timestamp(timestamp_or_long_or_datetime) + + Allows for custom timestamps to be saved with the record. .. method:: ttl(ttl_in_seconds) From b541d3a420983c4103f82508808189b99bda92ca Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Mon, 30 Dec 2013 14:58:58 -0800 Subject: [PATCH 0590/3726] making connection test imports play nice with nose --- cqlengine/tests/connections/test_connection_setup.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/cqlengine/tests/connections/test_connection_setup.py b/cqlengine/tests/connections/test_connection_setup.py index 5f1f7b774c..e07820f93c 100644 --- a/cqlengine/tests/connections/test_connection_setup.py +++ b/cqlengine/tests/connections/test_connection_setup.py @@ -1,20 +1,22 @@ from unittest import TestCase -from mock import MagicMock, patch, Mock +from mock import patch -from cqlengine.connection import setup, CQLConnectionError, Host +from cqlengine.connection import setup as setup_connection +from cqlengine.connection import CQLConnectionError, Host class OperationalErrorLoggingTest(TestCase): + @patch('cqlengine.connection.ConnectionPool', return_value=None, autospec=True) def test_setup_hosts(self, PatchedConnectionPool): with self.assertRaises(CQLConnectionError): - setup(hosts=['localhost:abcd']) + setup_connection(hosts=['localhost:abcd']) self.assertEqual(len(PatchedConnectionPool.mock_calls), 0) with self.assertRaises(CQLConnectionError): - setup(hosts=['localhost:9160:abcd']) + setup_connection(hosts=['localhost:9160:abcd']) self.assertEqual(len(PatchedConnectionPool.mock_calls), 0) - setup(hosts=['localhost:9161', 'remotehost']) + setup_connection(hosts=['localhost:9161', 'remotehost']) self.assertEqual(len(PatchedConnectionPool.mock_calls), 1) self.assertEqual(PatchedConnectionPool.call_args[0][0], [Host('localhost', 9161), Host('remotehost', 9160)]) From c28ec3fab47b41c0580f5a5b7844d2afe5b68d39 Mon Sep 17 00:00:00 2001 From: Mike Hall Date: Thu, 9 Jan 2014 14:44:53 -0800 Subject: [PATCH 0591/3726] accept uuid strings with or without dashes before converting to python UUID type --- cqlengine/columns.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index 11a9f60dcf..4ba74273b1 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -372,7 +372,7 @@ class UUID(Column): """ db_type = 'uuid' - re_uuid = re.compile(r'[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}') + re_uuid = re.compile(r'[0-9a-f]{8}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{12}') def validate(self, value): val = super(UUID, self).validate(value) From fb73405e58d14716f2d37b51fe3ea1d41f7cda68 Mon Sep 17 00:00:00 2001 From: Mike Hall Date: Fri, 10 Jan 2014 08:53:32 -0800 Subject: [PATCH 0592/3726] added test for uuids without dashes --- cqlengine/tests/columns/test_validation.py | 27 ++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/cqlengine/tests/columns/test_validation.py b/cqlengine/tests/columns/test_validation.py index 61020e27a5..4232b4003c 100644 --- a/cqlengine/tests/columns/test_validation.py +++ b/cqlengine/tests/columns/test_validation.py @@ -162,6 +162,33 @@ def test_decimal_io(self): dt2 = self.DecimalTest.objects(test_id=0).first() assert dt2.dec_val == D('5') +class TestUUID(BaseCassEngTestCase): + class UUIDTest(Model): + test_id = Integer(primary_key=True) + a_uuid = UUID(default=uuid4()) + + @classmethod + def setUpClass(cls): + super(TestUUID, cls).setUpClass() + create_table(cls.UUIDTest) + + @classmethod + def tearDownClass(cls): + super(TestUUID, cls).tearDownClass() + delete_table(cls.UUIDTest) + + def test_uuid_str_with_dashes(self): + a_uuid = uuid4() + t0 = self.UUIDTest.create(test_id=0, a_uuid=str(a_uuid)) + t1 = self.UUIDTest.get(test_id=0) + assert a_uuid == t1.a_uuid + + def test_uuid_str_no_dashes(self): + a_uuid = uuid4() + t0 = self.UUIDTest.create(test_id=1, a_uuid=a_uuid.hex) + t1 = self.UUIDTest.get(test_id=1) + assert a_uuid == t1.a_uuid + class TestTimeUUID(BaseCassEngTestCase): class TimeUUIDTest(Model): test_id = Integer(primary_key=True) From cf369e848e6d7e687a22823b66740633f58a3de4 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Tue, 14 Jan 2014 12:18:39 -0800 Subject: [PATCH 0593/3726] reset ttl and timestamp after save / update --- cqlengine/models.py | 10 ++++++---- cqlengine/tests/test_timestamp.py | 15 +++++++++++++++ 2 files changed, 21 insertions(+), 4 deletions(-) diff --git a/cqlengine/models.py b/cqlengine/models.py index 838028f46c..4e170d2dd0 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -475,6 +475,9 @@ def save(self): v.reset_previous_value() self._is_persisted = True + self._ttl = None + self._timestamp = None + return self def update(self, **values): @@ -510,6 +513,9 @@ def update(self, **values): v.reset_previous_value() self._is_persisted = True + self._ttl = None + self._timestamp = None + return self def delete(self): @@ -528,10 +534,6 @@ def _inst_batch(self, batch): self._batch = batch return self - # def __deepcopy__(self): - # tmp = type(self)() - # tmp.__dict__.update(self.__dict__) - # return tmp batch = hybrid_classmethod(_class_batch, _inst_batch) diff --git a/cqlengine/tests/test_timestamp.py b/cqlengine/tests/test_timestamp.py index 33ec3624ef..a5d9b870cc 100644 --- a/cqlengine/tests/test_timestamp.py +++ b/cqlengine/tests/test_timestamp.py @@ -110,6 +110,21 @@ def test_non_batch(self): with self.assertRaises(TestTimestampModel.DoesNotExist): TestTimestampModel.get(id=uid) + # calling .timestamp sets the TS on the model + tmp.timestamp(timedelta(seconds=5)) + tmp._timestamp.should.be.ok + + # calling save clears the set timestamp + tmp.save() + tmp._timestamp.shouldnt.be.ok + + tmp.timestamp(timedelta(seconds=5)) + tmp.update() + tmp._timestamp.shouldnt.be.ok + + + + def test_blind_delete(self): """ From 1a0325443dda9ae94e9ee681d420375faafd84bc Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Tue, 14 Jan 2014 12:22:02 -0800 Subject: [PATCH 0594/3726] changelog update --- changelog | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/changelog b/changelog index 0764800fb2..c9e9b0ff51 100644 --- a/changelog +++ b/changelog @@ -2,9 +2,10 @@ CHANGELOG 0.11.0 (in progress) -* support for USING TIMESTAMP +* support for USING TIMESTAMP via a .timestamp(timedelta(seconds=30)) syntax - allows for long, timedelta, and datetime * fixed use of USING TIMESTAMP in batches +* clear TTL and timestamp off models after persisting to DB 0.10.0 From 516b16ee5610a32f9926e2d99bebb464f0104e6f Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 15 Jan 2014 12:37:57 -0800 Subject: [PATCH 0595/3726] simplified return of the timestamp on the class --- cqlengine/models.py | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/cqlengine/models.py b/cqlengine/models.py index 4e170d2dd0..af5da3ffe1 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -1,6 +1,5 @@ from collections import OrderedDict import re -import copy from cqlengine import columns from cqlengine.exceptions import ModelException, CQLEngineException, ValidationError from cqlengine.query import ModelQuerySet, DMLQuery, AbstractQueryableColumn @@ -101,19 +100,13 @@ class TimestampDescriptor(object): def __get__(self, instance, model): if instance: # instance method - #instance = copy.deepcopy(instance) def timestamp_setter(ts): instance._timestamp = ts return instance return timestamp_setter - qs = model.__queryset__(model) - - def timestamp_setter(ts): - qs._timestamp = ts - return qs + return model.objects.timestamp - return timestamp_setter def __call__(self, *args, **kwargs): raise NotImplementedError From 19777eb68e75bab674ccc4e75d6aef2837ef821a Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 15 Jan 2014 12:42:47 -0800 Subject: [PATCH 0596/3726] release process --- RELEASE.txt | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 RELEASE.txt diff --git a/RELEASE.txt b/RELEASE.txt new file mode 100644 index 0000000000..c4def8ddce --- /dev/null +++ b/RELEASE.txt @@ -0,0 +1,7 @@ +Check changelog +Ensure docs are updated +Tests pass +Update VERSION +Push tag to github +Push release to pypi + From 701099e69db492c081309f3c27243efdf1409136 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 15 Jan 2014 13:47:32 -0800 Subject: [PATCH 0597/3726] updated changelog --- changelog | 1 + 1 file changed, 1 insertion(+) diff --git a/changelog b/changelog index c9e9b0ff51..71d3dd469f 100644 --- a/changelog +++ b/changelog @@ -6,6 +6,7 @@ CHANGELOG - allows for long, timedelta, and datetime * fixed use of USING TIMESTAMP in batches * clear TTL and timestamp off models after persisting to DB +* allows UUID without - (Thanks to Michael Haddad, github.com/mahall) 0.10.0 From 5a0f945294a1b7120a745727951ed25f52c78e2f Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 15 Jan 2014 15:53:07 -0800 Subject: [PATCH 0598/3726] updated changelog with note for dokai's fix to table settings --- changelog | 1 + 1 file changed, 1 insertion(+) diff --git a/changelog b/changelog index 71d3dd469f..a908f72092 100644 --- a/changelog +++ b/changelog @@ -7,6 +7,7 @@ CHANGELOG * fixed use of USING TIMESTAMP in batches * clear TTL and timestamp off models after persisting to DB * allows UUID without - (Thanks to Michael Haddad, github.com/mahall) +* fixes regarding syncing schema settings (thanks Kai Lautaportti github.com/dokai) 0.10.0 From 83d26c2f8d4381fb81ea4c3541f02fe21bc4b734 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 15 Jan 2014 15:58:26 -0800 Subject: [PATCH 0599/3726] test around delete consistency #148 --- cqlengine/models.py | 2 +- cqlengine/tests/test_consistency.py | 16 ++++++++++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/cqlengine/models.py b/cqlengine/models.py index 57a9f2df24..4f70c3eed3 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -513,7 +513,7 @@ def update(self, **values): def delete(self): """ Deletes this instance """ - self.__dmlquery__(self.__class__, self, batch=self._batch, timestamp=self._timestamp).delete() + self.__dmlquery__(self.__class__, self, batch=self._batch, timestamp=self._timestamp, consistency=self.__consistency__).delete() def get_changed_columns(self): """ returns a list of the columns that have been updated since instantiation or save """ diff --git a/cqlengine/tests/test_consistency.py b/cqlengine/tests/test_consistency.py index 94a236aaf8..2f54ae243c 100644 --- a/cqlengine/tests/test_consistency.py +++ b/cqlengine/tests/test_consistency.py @@ -76,3 +76,19 @@ def test_blind_update(self): args = m.call_args self.assertEqual(ALL, args[0][2]) + + + def test_delete(self): + # ensures we always carry consistency through on delete statements + t = TestConsistencyModel.create(text="bacon and eggs") + t.text = "ham and cheese sandwich" + uid = t.id + + with mock.patch.object(ConnectionPool, 'execute') as m: + t.consistency(ALL).delete() + + with mock.patch.object(ConnectionPool, 'execute') as m: + TestConsistencyModel.objects(id=uid).consistency(ALL).delete() + + args = m.call_args + self.assertEqual(ALL, args[0][2]) From e4e4bb7d322c5ad946a7c5e9b89b1630b14903c0 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 15 Jan 2014 17:30:49 -0800 Subject: [PATCH 0600/3726] version bump --- cqlengine/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cqlengine/VERSION b/cqlengine/VERSION index 78bc1abd14..d9df1bbc0c 100644 --- a/cqlengine/VERSION +++ b/cqlengine/VERSION @@ -1 +1 @@ -0.10.0 +0.11.0 From ac2dd3aa5bed561e0ca708466d2a991207c83b54 Mon Sep 17 00:00:00 2001 From: Kevin Deldycke Date: Wed, 22 Jan 2014 11:53:34 +0100 Subject: [PATCH 0601/3726] Use validation method to normalize Boolean values. --- cqlengine/columns.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index 4ba74273b1..f899f92af6 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -442,11 +442,15 @@ class Quoter(ValueQuoter): def __str__(self): return 'true' if self.value else 'false' - def to_python(self, value): + def validate(self, value): + """ Always returns a Python boolean. """ return bool(value) + def to_python(self, value): + return self.validate(value) + def to_database(self, value): - return self.Quoter(bool(value)) + return self.Quoter(self.validate(value)) class Float(Column): From 3d2268c6e8f94d7aa19699efb205643661ddb547 Mon Sep 17 00:00:00 2001 From: Kevin Deldycke Date: Wed, 22 Jan 2014 11:54:27 +0100 Subject: [PATCH 0602/3726] Unquote boolean values when normalizing. --- cqlengine/columns.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index f899f92af6..2452a91c9c 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -444,6 +444,8 @@ def __str__(self): def validate(self, value): """ Always returns a Python boolean. """ + if isinstance(value, self.Quoter): + value = value.value return bool(value) def to_python(self, value): From b768b6e6b86f7e790f136c5fa83a9b29352a9b0f Mon Sep 17 00:00:00 2001 From: Kevin Deldycke Date: Wed, 22 Jan 2014 11:56:57 +0100 Subject: [PATCH 0603/3726] Update changelog. --- changelog | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/changelog b/changelog index a908f72092..1284fa6015 100644 --- a/changelog +++ b/changelog @@ -1,6 +1,10 @@ CHANGELOG -0.11.0 (in progress) +0.11.1 (in progress) + +* Normalize and unquote boolean values. + +0.11.0 * support for USING TIMESTAMP via a .timestamp(timedelta(seconds=30)) syntax - allows for long, timedelta, and datetime From af8662d87efd37b06f0da0b30b06eeeb03711f9c Mon Sep 17 00:00:00 2001 From: Kevin Deldycke Date: Wed, 22 Jan 2014 11:57:51 +0100 Subject: [PATCH 0604/3726] Add myself to the list of contributors. --- AUTHORS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/AUTHORS b/AUTHORS index a220cb26d6..70cdaeaf80 100644 --- a/AUTHORS +++ b/AUTHORS @@ -6,5 +6,5 @@ Jon Haddad CONTRIBUTORS Eric Scrivner - test environment, connection pooling - +Kevin Deldycke From 70d866018d3fc56e960d0f7746a5a6d33f8aadd8 Mon Sep 17 00:00:00 2001 From: drewlll2ll Date: Wed, 22 Jan 2014 11:08:16 -0500 Subject: [PATCH 0605/3726] Fixed incompatibilities w/ cassandra 2.0 --- cqlengine/columns.py | 14 ++++++++++++-- cqlengine/statements.py | 19 +++++++++++++++++++ 2 files changed, 31 insertions(+), 2 deletions(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index 11a9f60dcf..2ee93ecd0a 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -5,6 +5,7 @@ import re from uuid import uuid1, uuid4 from cql.query import cql_quote +from cql.cqltypes import DateType from cqlengine.exceptions import ValidationError @@ -209,7 +210,11 @@ class Bytes(Column): def to_database(self, value): val = super(Bytes, self).to_database(value) if val is None: return - return val.encode('hex') + return '0x' + val.encode('hex') + + def to_python(self, value): + #return value[2:].decode('hex') + return value class Ascii(Column): @@ -326,7 +331,10 @@ def to_python(self, value): return value elif isinstance(value, date): return datetime(*(value.timetuple()[:6])) - return datetime.utcfromtimestamp(value) + try: + return datetime.utcfromtimestamp(value) + except TypeError: + return datetime.utcfromtimestamp(DateType.deserialize(value)) def to_database(self, value): value = super(DateTime, self).to_database(value) @@ -352,6 +360,8 @@ def to_python(self, value): return value.date() elif isinstance(value, date): return value + else: + value = DateType.deserialize(value) return datetime.utcfromtimestamp(value).date() diff --git a/cqlengine/statements.py b/cqlengine/statements.py index 45f3fabead..014939ba86 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -5,6 +5,25 @@ class StatementException(Exception): pass +# Monkey patch cql_quote to allow raw hex values +import cql.query + + +def cql_quote_replacement(term): + if isinstance(term, basestring) and term.startswith('0x'): + if isinstance(term, unicode): + return term.encode('utf8') + else: + return term + elif isinstance(term, unicode): + return "'%s'" % cql.query.__escape_quotes(term.encode('utf8')) + elif isinstance(term, (str, bool)): + return "'%s'" % cql.query.__escape_quotes(str(term)) + else: + return str(term) +cql.query.cql_quote = cql_quote_replacement + + class ValueQuoter(object): def __init__(self, value): From c533ae61996447fdc035c9a9561a3a26e546a3ed Mon Sep 17 00:00:00 2001 From: drewlll2ll Date: Wed, 22 Jan 2014 11:32:59 -0500 Subject: [PATCH 0606/3726] Fixed Date column --- cqlengine/columns.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index 2ee93ecd0a..7365d2b21a 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -353,17 +353,16 @@ def to_database(self, value): class Date(Column): db_type = 'timestamp' - def to_python(self, value): if value is None: return if isinstance(value, datetime): return value.date() elif isinstance(value, date): return value - else: - value = DateType.deserialize(value) - - return datetime.utcfromtimestamp(value).date() + try: + return datetime.utcfromtimestamp(value).date() + except TypeError: + return datetime.utcfromtimestamp(DateType.deserialize(value)).date() def to_database(self, value): value = super(Date, self).to_database(value) From a2a532aa5aac0c6156d14eed64afedfec082d1aa Mon Sep 17 00:00:00 2001 From: drewlll2ll Date: Wed, 22 Jan 2014 12:11:03 -0500 Subject: [PATCH 0607/3726] Updated get_fields to be 2.0 compatible --- cqlengine/management.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/cqlengine/management.py b/cqlengine/management.py index 6d41489e6f..0eb3bdf390 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -228,13 +228,19 @@ def get_fields(model): col_family = model.column_family_name(include_keyspace=False) with connection_manager() as con: - query = "SELECT column_name, validator FROM system.schema_columns \ + query = "SELECT * FROM system.schema_columns \ WHERE keyspace_name = :ks_name AND columnfamily_name = :col_family" logger.debug("get_fields %s %s", ks_name, col_family) tmp = con.execute(query, {'ks_name': ks_name, 'col_family': col_family}, ONE) - return [Field(x[0], x[1]) for x in tmp.results] + + column_indices = [tmp.columns.index('column_name'), tmp.columns.index('validator')] + try: + type_index = tmp.columns.index('type') + return [Field(x[column_indices[0]], x[column_indices[1]]) for x in tmp.results if x[type_index] == 'regular'] + except ValueError: + return [Field(x[column_indices[0]], x[column_indices[1]]) for x in tmp.results] # convert to Field named tuples From 74bc38e7e73f3a4a03203052c1fc0b5dc2f3f8d3 Mon Sep 17 00:00:00 2001 From: Kevin Deldycke Date: Thu, 23 Jan 2014 10:46:49 +0100 Subject: [PATCH 0608/3726] Unit test boolean value quoting. --- cqlengine/tests/columns/test_value_io.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/cqlengine/tests/columns/test_value_io.py b/cqlengine/tests/columns/test_value_io.py index 54d8f5d5c5..c4958f40dd 100644 --- a/cqlengine/tests/columns/test_value_io.py +++ b/cqlengine/tests/columns/test_value_io.py @@ -144,6 +144,19 @@ class TestBooleanIO(BaseColumnIOTest): pkey_val = True data_val = False + def comparator_converter(self, val): + return val.value if isinstance(val, columns.Boolean.Quoter) else val + +class TestBooleanQuoter(BaseColumnIOTest): + + column = columns.Boolean + + pkey_val = True + data_val = columns.Boolean.Quoter(False) + + def comparator_converter(self, val): + return val.value if isinstance(val, columns.Boolean.Quoter) else val + class TestFloatIO(BaseColumnIOTest): column = columns.Float From cda56c117ba77977b9d2f14ba7ca85229445565d Mon Sep 17 00:00:00 2001 From: Roey Berman Date: Mon, 27 Jan 2014 18:54:43 +0200 Subject: [PATCH 0609/3726] fix (rare) race condition in ConnectionPool.get() --- cqlengine/connection.py | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index 01b94023fa..6872bb8e97 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -3,7 +3,11 @@ #http://cassandra.apache.org/doc/cql/CQL.html from collections import namedtuple -import Queue +try: + import Queue as queue +except ImportError: + # python 3 + import queue import random import cql @@ -118,7 +122,7 @@ def __init__( self._consistency = consistency self._timeout = timeout - self._queue = Queue.Queue(maxsize=_max_connections) + self._queue = queue.Queue(maxsize=_max_connections) def clear(self): """ @@ -138,11 +142,14 @@ def get(self): a new one. """ try: - if self._queue.empty(): - return self._create_connection() + # get with blocking=False (default) returns an item if one + # is immediately available, else raises the Empty exception return self._queue.get() - except CQLConnectionError as cqle: - raise cqle + except queue.Empty: + try: + return self._create_connection() + except CQLConnectionError as cqle: + raise cqle def put(self, conn): """ From 1b0cbe27d59e1869b8de410de5b52ca06620db15 Mon Sep 17 00:00:00 2001 From: Roey Berman Date: Sun, 2 Feb 2014 13:46:25 +0200 Subject: [PATCH 0610/3726] connection queue - issue non-blocking get to pool queue --- cqlengine/connection.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index 6872bb8e97..2eb2d30ef9 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -142,9 +142,9 @@ def get(self): a new one. """ try: - # get with blocking=False (default) returns an item if one + # get with block=False returns an item if one # is immediately available, else raises the Empty exception - return self._queue.get() + return self._queue.get(block=False) except queue.Empty: try: return self._create_connection() From 79fc15ecd825af9a3884c254407065d32b17c296 Mon Sep 17 00:00:00 2001 From: Travis Glines Date: Thu, 6 Feb 2014 16:04:02 -0800 Subject: [PATCH 0611/3726] Added test to verify limit immutability and deep copy in queryset --- cqlengine/tests/query/test_queryset.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/cqlengine/tests/query/test_queryset.py b/cqlengine/tests/query/test_queryset.py index 97e6033551..fbe2affdcc 100644 --- a/cqlengine/tests/query/test_queryset.py +++ b/cqlengine/tests/query/test_queryset.py @@ -127,6 +127,20 @@ def test_queryset_is_immutable(self): assert len(query2._where) == 2 assert len(query1._where) == 1 + def test_queryset_limit_immutability(self): + """ + Tests that calling a queryset function that changes it's state returns a new queryset with same limit + """ + query1 = TestModel.objects(test_id=5).limit(1) + assert query1._limit == 1 + + query2 = query1.filter(expected_result__gte=1) + assert query2._limit == 1 + + query3 = query1.filter(expected_result__gte=1).limit(2) + assert query1._limit == 1 + assert query3._limit == 2 + def test_the_all_method_duplicates_queryset(self): """ Tests that calling all on a queryset with previously defined filters duplicates queryset From 3cf722a963652dc4f6ac45456521b9e133f95a49 Mon Sep 17 00:00:00 2001 From: Kevin Deldycke Date: Fri, 7 Feb 2014 16:27:22 +0100 Subject: [PATCH 0612/3726] Allow acces to instance columns as if it is a dict. --- changelog | 1 + cqlengine/models.py | 31 ++++++++++++++++++++++++++ cqlengine/tests/model/test_model_io.py | 21 +++++++++++++++++ 3 files changed, 53 insertions(+) diff --git a/changelog b/changelog index a908f72092..8f14d773cf 100644 --- a/changelog +++ b/changelog @@ -8,6 +8,7 @@ CHANGELOG * clear TTL and timestamp off models after persisting to DB * allows UUID without - (Thanks to Michael Haddad, github.com/mahall) * fixes regarding syncing schema settings (thanks Kai Lautaportti github.com/dokai) +* allow acces to instance columns as if it is a dict 0.10.0 diff --git a/cqlengine/models.py b/cqlengine/models.py index 4f70c3eed3..27f0316bb2 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -421,6 +421,37 @@ def validate(self): val = col.validate(getattr(self, name)) setattr(self, name, val) + ### Let an instance be used like a dict of its columns keys/values + + def __iter__(self): + """ Iterate over column ids. """ + for column_id in self._columns.keys(): + yield column_id + + def __getitem__(self, key): + """ Returns column's value. """ + if not isinstance(key, basestring): + raise TypeError + if key not in self._columns.keys(): + raise KeyError + return getattr(self, key) + + def __len__(self): + """ Returns the number of columns defined on that model. """ + return len(self._columns.keys()) + + def keys(self): + """ Returns list of column's IDs. """ + return [k for k in self] + + def values(self): + """ Returns list of column's values. """ + return [self[k] for k in self] + + def items(self): + """ Returns a dictionnary of columns's IDs/values. """ + return [(k, self[k]) for k in self] + def _as_dict(self): """ Returns a map of column names to cleaned values """ values = self._dynamic_columns or {} diff --git a/cqlengine/tests/model/test_model_io.py b/cqlengine/tests/model/test_model_io.py index 82abef74ba..512aa2324f 100644 --- a/cqlengine/tests/model/test_model_io.py +++ b/cqlengine/tests/model/test_model_io.py @@ -1,6 +1,7 @@ from uuid import uuid4 import random from datetime import date +from operator import itemgetter from cqlengine.tests.base import BaseCassEngTestCase from cqlengine.management import create_table @@ -43,6 +44,26 @@ def test_model_save_and_load(self): for cname in tm._columns.keys(): self.assertEquals(getattr(tm, cname), getattr(tm2, cname)) + def test_model_read_as_dict(self): + """ + Tests that columns of an instance can be read as a dict. + """ + tm = TestModel.create(count=8, text='123456789', a_bool=True) + column_dict = { + 'id': tm.id, + 'count': tm.count, + 'text': tm.text, + 'a_bool': tm.a_bool, + } + self.assertEquals(sorted(tm.keys()), sorted(column_dict.keys())) + self.assertEquals(sorted(tm.values()), sorted(column_dict.values())) + self.assertEquals( + sorted(tm.items(), key=itemgetter(0)), + sorted(column_dict.items(), key=itemgetter(0))) + self.assertEquals(len(tm), len(column_dict)) + for column_id in column_dict.keys(): + self.assertEqual(tm[column_id], column_dict[column_id]) + def test_model_updating_works_properly(self): """ Tests that subsequent saves after initial model creation work From ef5745bc5f85f613a663ed1d2102320e1f0acd1b Mon Sep 17 00:00:00 2001 From: Kevin Deldycke Date: Fri, 7 Feb 2014 16:31:39 +0100 Subject: [PATCH 0613/3726] Fix comment. --- cqlengine/models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cqlengine/models.py b/cqlengine/models.py index 27f0316bb2..5655c9df54 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -449,7 +449,7 @@ def values(self): return [self[k] for k in self] def items(self): - """ Returns a dictionnary of columns's IDs/values. """ + """ Returns a list of columns's IDs/values. """ return [(k, self[k]) for k in self] def _as_dict(self): From 95d99bf35982a75df961522367a0122effec4f24 Mon Sep 17 00:00:00 2001 From: Danny Cosson Date: Sun, 9 Feb 2014 14:39:26 -0500 Subject: [PATCH 0614/3726] Implements the ability to blind update add to a set. --- cqlengine/query.py | 20 +++++++++++++++----- cqlengine/statements.py | 9 ++++++--- cqlengine/tests/query/test_updates.py | 12 +++++++++++- 3 files changed, 32 insertions(+), 9 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index f991f5e9cb..f226a70ed4 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -663,24 +663,34 @@ def update(self, **values): nulled_columns = set() us = UpdateStatement(self.column_family_name, where=self._where, ttl=self._ttl, timestamp=self._timestamp) for name, val in values.items(): - col = self.model._columns.get(name) + col_name, col_op = self._parse_filter_arg(name) + col = self.model._columns.get(col_name) # check for nonexistant columns if col is None: - raise ValidationError("{}.{} has no column named: {}".format(self.__module__, self.model.__name__, name)) + raise ValidationError("{}.{} has no column named: {}".format(self.__module__, self.model.__name__, col_name)) # check for primary key update attempts if col.is_primary_key: - raise ValidationError("Cannot apply update to primary key '{}' for {}.{}".format(name, self.__module__, self.model.__name__)) + raise ValidationError("Cannot apply update to primary key '{}' for {}.{}".format(col_name, self.__module__, self.model.__name__)) val = col.validate(val) if val is None: - nulled_columns.add(name) + nulled_columns.add(col_name) continue + # add the update statements if isinstance(col, Counter): # TODO: implement counter updates raise NotImplementedError + elif isinstance(col, BaseContainerColumn): + if isinstance(col, List): klass = ListUpdateClause + elif isinstance(col, Map): klass = MapUpdateClause + elif isinstance(col, Set): klass = SetUpdateClause + else: raise RuntimeError + us.add_assignment_clause(klass( + col_name, col.to_database(val), operation=col_op)) else: - us.add_assignment_clause(AssignmentClause(name, col.to_database(val))) + us.add_assignment_clause(AssignmentClause( + col_name, col.to_database(val))) if us.assignments: self._execute(us) diff --git a/cqlengine/statements.py b/cqlengine/statements.py index 0e85c9292e..ba4d7061c1 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -136,10 +136,11 @@ def insert_tuple(self): class ContainerUpdateClause(AssignmentClause): - def __init__(self, field, value, previous=None, column=None): + def __init__(self, field, value, operation=None, previous=None, column=None): super(ContainerUpdateClause, self).__init__(field, value) self.previous = previous self._assignments = None + self._operation = operation self._analyzed = False self._column = column @@ -159,8 +160,8 @@ def update_context(self, ctx): class SetUpdateClause(ContainerUpdateClause): """ updates a set collection """ - def __init__(self, field, value, previous=None, column=None): - super(SetUpdateClause, self).__init__(field, value, previous, column=column) + def __init__(self, field, value, operation=None, previous=None, column=None): + super(SetUpdateClause, self).__init__(field, value, operation, previous, column=column) self._additions = None self._removals = None @@ -182,6 +183,8 @@ def _analyze(self): """ works out the updates to be performed """ if self.value is None or self.value == self.previous: pass + elif self._operation == "add": + self._additions = self.value elif self.previous is None: self._assignments = self.value else: diff --git a/cqlengine/tests/query/test_updates.py b/cqlengine/tests/query/test_updates.py index b54b294c60..13f2be039c 100644 --- a/cqlengine/tests/query/test_updates.py +++ b/cqlengine/tests/query/test_updates.py @@ -13,7 +13,7 @@ class TestQueryUpdateModel(Model): cluster = columns.Integer(primary_key=True) count = columns.Integer(required=False) text = columns.Text(required=False, index=True) - + text_set = columns.Set(columns.Text, required=False) class QueryUpdateTests(BaseCassEngTestCase): @@ -115,3 +115,13 @@ def test_mixed_value_and_null_update(self): def test_counter_updates(self): pass + + def test_set_add_updates(self): + partition = uuid4() + cluster = 1 + TestQueryUpdateModel.objects.create( + partition=partition, cluster=cluster, text_set={"foo"}) + TestQueryUpdateModel.objects( + partition=partition, cluster=cluster).update(text_set__add={'bar'}) + obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster) + self.assertEqual(obj.text_set, {"foo", "bar"}) From b777f8cfc6977b59cccf6bc8cc7e78a8a5b0e357 Mon Sep 17 00:00:00 2001 From: Danny Cosson Date: Sun, 9 Feb 2014 14:42:39 -0500 Subject: [PATCH 0615/3726] Adds a test for a blind update append when the record doesn't exist yet --- cqlengine/tests/query/test_updates.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/cqlengine/tests/query/test_updates.py b/cqlengine/tests/query/test_updates.py index 13f2be039c..36b229dfc7 100644 --- a/cqlengine/tests/query/test_updates.py +++ b/cqlengine/tests/query/test_updates.py @@ -125,3 +125,13 @@ def test_set_add_updates(self): partition=partition, cluster=cluster).update(text_set__add={'bar'}) obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster) self.assertEqual(obj.text_set, {"foo", "bar"}) + + def test_set_add_updates_new_record(self): + """ If the key doesn't exist yet, an update creates the record + """ + partition = uuid4() + cluster = 1 + TestQueryUpdateModel.objects( + partition=partition, cluster=cluster).update(text_set__add={'bar'}) + obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster) + self.assertEqual(obj.text_set, {"bar"}) From 47b1d2e3cf3619dc86342bc68c73080bbc226ea5 Mon Sep 17 00:00:00 2001 From: Danny Cosson Date: Sun, 9 Feb 2014 14:45:27 -0500 Subject: [PATCH 0616/3726] Adds blind set removal, and a test that it works --- cqlengine/statements.py | 2 ++ cqlengine/tests/query/test_updates.py | 11 +++++++++++ 2 files changed, 13 insertions(+) diff --git a/cqlengine/statements.py b/cqlengine/statements.py index ba4d7061c1..111574c167 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -185,6 +185,8 @@ def _analyze(self): pass elif self._operation == "add": self._additions = self.value + elif self._operation == "remove": + self._removals = self.value elif self.previous is None: self._assignments = self.value else: diff --git a/cqlengine/tests/query/test_updates.py b/cqlengine/tests/query/test_updates.py index 36b229dfc7..fc1fe0e171 100644 --- a/cqlengine/tests/query/test_updates.py +++ b/cqlengine/tests/query/test_updates.py @@ -135,3 +135,14 @@ def test_set_add_updates_new_record(self): partition=partition, cluster=cluster).update(text_set__add={'bar'}) obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster) self.assertEqual(obj.text_set, {"bar"}) + + def test_set_remove_updates(self): + partition = uuid4() + cluster = 1 + TestQueryUpdateModel.objects.create( + partition=partition, cluster=cluster, text_set={"foo", "baz"}) + TestQueryUpdateModel.objects( + partition=partition, cluster=cluster).update( + text_set__remove={'foo'}) + obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster) + self.assertEqual(obj.text_set, {"baz"}) From cc52ad6c6652b2f52a11fcdc075e0f3df27f927f Mon Sep 17 00:00:00 2001 From: Danny Cosson Date: Sun, 9 Feb 2014 14:47:42 -0500 Subject: [PATCH 0617/3726] Adds a test that removing something that's not in a set just fails quietly --- cqlengine/tests/query/test_updates.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/cqlengine/tests/query/test_updates.py b/cqlengine/tests/query/test_updates.py index fc1fe0e171..65f8f57211 100644 --- a/cqlengine/tests/query/test_updates.py +++ b/cqlengine/tests/query/test_updates.py @@ -146,3 +146,16 @@ def test_set_remove_updates(self): text_set__remove={'foo'}) obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster) self.assertEqual(obj.text_set, {"baz"}) + + def test_set_remove_new_record(self): + """ Removing something not in the set should silently do nothing + """ + partition = uuid4() + cluster = 1 + TestQueryUpdateModel.objects.create( + partition=partition, cluster=cluster, text_set={"foo"}) + TestQueryUpdateModel.objects( + partition=partition, cluster=cluster).update( + text_set__remove={'afsd'}) + obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster) + self.assertEqual(obj.text_set, {"foo"}) From 57f92c0777d1fe3d48141b5b947f897b1e2ddc76 Mon Sep 17 00:00:00 2001 From: Danny Cosson Date: Sun, 9 Feb 2014 15:42:28 -0500 Subject: [PATCH 0618/3726] Adds ability to blind append and prepend to a list --- cqlengine/query.py | 3 +-- cqlengine/statements.py | 13 ++++++++++--- cqlengine/tests/query/test_updates.py | 23 +++++++++++++++++++++++ 3 files changed, 34 insertions(+), 5 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index f226a70ed4..5f32a87cb4 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -681,9 +681,8 @@ def update(self, **values): if isinstance(col, Counter): # TODO: implement counter updates raise NotImplementedError - elif isinstance(col, BaseContainerColumn): + elif isinstance(col, (List, Set)): if isinstance(col, List): klass = ListUpdateClause - elif isinstance(col, Map): klass = MapUpdateClause elif isinstance(col, Set): klass = SetUpdateClause else: raise RuntimeError us.add_assignment_clause(klass( diff --git a/cqlengine/statements.py b/cqlengine/statements.py index 111574c167..d53e0cd0fa 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -215,8 +215,8 @@ def update_context(self, ctx): class ListUpdateClause(ContainerUpdateClause): """ updates a list collection """ - def __init__(self, field, value, previous=None, column=None): - super(ListUpdateClause, self).__init__(field, value, previous, column=column) + def __init__(self, field, value, operation=None, previous=None, column=None): + super(ListUpdateClause, self).__init__(field, value, operation, previous, column=column) self._append = None self._prepend = None @@ -261,6 +261,14 @@ def _analyze(self): if self.value is None or self.value == self.previous: pass + elif self._operation == "append": + self._append = self.value + + elif self._operation == "prepend": + # self.value is a Quoter but we reverse self._prepend later as if + # it's a list, so we have to set it to the underlying list + self._prepend = self.value.value + elif self.previous is None: self._assignments = self.value @@ -269,7 +277,6 @@ def _analyze(self): # rewrite the whole list self._assignments = self.value - elif len(self.previous) == 0: # if we're updating from an empty # list, do a complete insert diff --git a/cqlengine/tests/query/test_updates.py b/cqlengine/tests/query/test_updates.py index 65f8f57211..1f62e8ce5d 100644 --- a/cqlengine/tests/query/test_updates.py +++ b/cqlengine/tests/query/test_updates.py @@ -14,6 +14,7 @@ class TestQueryUpdateModel(Model): count = columns.Integer(required=False) text = columns.Text(required=False, index=True) text_set = columns.Set(columns.Text, required=False) + text_list = columns.List(columns.Text, required=False) class QueryUpdateTests(BaseCassEngTestCase): @@ -159,3 +160,25 @@ def test_set_remove_new_record(self): text_set__remove={'afsd'}) obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster) self.assertEqual(obj.text_set, {"foo"}) + + def test_list_append_updates(self): + partition = uuid4() + cluster = 1 + TestQueryUpdateModel.objects.create( + partition=partition, cluster=cluster, text_list=["foo"]) + TestQueryUpdateModel.objects( + partition=partition, cluster=cluster).update( + text_list__append=['bar']) + obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster) + self.assertEqual(obj.text_list, ["foo", "bar"]) + + def test_list_prepend_updates(self): + partition = uuid4() + cluster = 1 + TestQueryUpdateModel.objects.create( + partition=partition, cluster=cluster, text_list=["foo"]) + TestQueryUpdateModel.objects( + partition=partition, cluster=cluster).update( + text_list__prepend=['bar']) + obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster) + self.assertEqual(obj.text_list, ["bar", "foo"]) From e11f53e370e634dc271a18056cd158204971d561 Mon Sep 17 00:00:00 2001 From: Danny Cosson Date: Mon, 10 Feb 2014 10:15:52 -0500 Subject: [PATCH 0619/3726] Updates the prepend to list test to make sure order is preserved when multiple items are prepended --- cqlengine/tests/query/test_updates.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/cqlengine/tests/query/test_updates.py b/cqlengine/tests/query/test_updates.py index 1f62e8ce5d..1541f69968 100644 --- a/cqlengine/tests/query/test_updates.py +++ b/cqlengine/tests/query/test_updates.py @@ -173,12 +173,13 @@ def test_list_append_updates(self): self.assertEqual(obj.text_list, ["foo", "bar"]) def test_list_prepend_updates(self): + """ Prepend two things since order is reversed by default by CQL """ partition = uuid4() cluster = 1 TestQueryUpdateModel.objects.create( partition=partition, cluster=cluster, text_list=["foo"]) TestQueryUpdateModel.objects( partition=partition, cluster=cluster).update( - text_list__prepend=['bar']) + text_list__prepend=['bar', 'baz']) obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster) - self.assertEqual(obj.text_list, ["bar", "foo"]) + self.assertEqual(obj.text_list, ["bar", "baz", "foo"]) From 9dc55ec54bef7c218ad2d9036228a2555b2b6aa5 Mon Sep 17 00:00:00 2001 From: Danny Cosson Date: Mon, 10 Feb 2014 11:28:03 -0500 Subject: [PATCH 0620/3726] Adds a map __merge update mode, to merge in map with the existing values on the server. --- cqlengine/query.py | 3 ++- cqlengine/statements.py | 12 +++++++----- cqlengine/tests/query/test_updates.py | 14 ++++++++++++++ 3 files changed, 23 insertions(+), 6 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index 5f32a87cb4..ce7036554b 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -681,9 +681,10 @@ def update(self, **values): if isinstance(col, Counter): # TODO: implement counter updates raise NotImplementedError - elif isinstance(col, (List, Set)): + elif isinstance(col, (List, Set, Map)): if isinstance(col, List): klass = ListUpdateClause elif isinstance(col, Set): klass = SetUpdateClause + elif isinstance(col, Map): klass = MapUpdateClause else: raise RuntimeError us.add_assignment_clause(klass( col_name, col.to_database(val), operation=col_op)) diff --git a/cqlengine/statements.py b/cqlengine/statements.py index d53e0cd0fa..5ae1d213d4 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -310,13 +310,16 @@ def _analyze(self): class MapUpdateClause(ContainerUpdateClause): """ updates a map collection """ - def __init__(self, field, value, previous=None, column=None): - super(MapUpdateClause, self).__init__(field, value, previous, column=column) + def __init__(self, field, value, operation=None, previous=None, column=None): + super(MapUpdateClause, self).__init__(field, value, operation, previous, column=column) self._updates = None self.previous = self.previous or {} def _analyze(self): - self._updates = sorted([k for k, v in self.value.items() if v != self.previous.get(k)]) or None + if self._operation == "merge": + self._updates = self.value.value + else: + self._updates = sorted([k for k, v in self.value.items() if v != self.previous.get(k)]) or None self._analyzed = True def get_context_size(self): @@ -326,8 +329,7 @@ def get_context_size(self): def update_context(self, ctx): if not self._analyzed: self._analyze() ctx_id = self.context_id - for key in self._updates or []: - val = self.value.get(key) + for key, val in self._updates.items(): ctx[str(ctx_id)] = self._column.key_col.to_database(key) if self._column else key ctx[str(ctx_id + 1)] = self._column.value_col.to_database(val) if self._column else val ctx_id += 2 diff --git a/cqlengine/tests/query/test_updates.py b/cqlengine/tests/query/test_updates.py index 1541f69968..723a8e87f2 100644 --- a/cqlengine/tests/query/test_updates.py +++ b/cqlengine/tests/query/test_updates.py @@ -15,6 +15,7 @@ class TestQueryUpdateModel(Model): text = columns.Text(required=False, index=True) text_set = columns.Set(columns.Text, required=False) text_list = columns.List(columns.Text, required=False) + text_map = columns.Map(columns.Text, columns.Text, required=False) class QueryUpdateTests(BaseCassEngTestCase): @@ -183,3 +184,16 @@ def test_list_prepend_updates(self): text_list__prepend=['bar', 'baz']) obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster) self.assertEqual(obj.text_list, ["bar", "baz", "foo"]) + + def test_map_merge_updates(self): + """ Merge a dictionary into existing value """ + partition = uuid4() + cluster = 1 + TestQueryUpdateModel.objects.create( + partition=partition, cluster=cluster, + text_map={"foo": '1', "bar": '2'}) + TestQueryUpdateModel.objects( + partition=partition, cluster=cluster).update( + text_map__merge={"bar": '3', "baz": '4'}) + obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster) + self.assertEqual(obj.text_map, {"foo": '1', "bar": '3', "baz": '4'}) From c344f1e2742dc49e7d2ea17f4ec5a0632daabef2 Mon Sep 17 00:00:00 2001 From: Danny Cosson Date: Mon, 10 Feb 2014 11:54:31 -0500 Subject: [PATCH 0621/3726] Tries to add a test that doesn't work because of a bug in the underlying cql library. So doing an update with a dict __merge with a None value will not be possible for the time being. --- cqlengine/tests/query/test_updates.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/cqlengine/tests/query/test_updates.py b/cqlengine/tests/query/test_updates.py index 723a8e87f2..592eb28595 100644 --- a/cqlengine/tests/query/test_updates.py +++ b/cqlengine/tests/query/test_updates.py @@ -197,3 +197,22 @@ def test_map_merge_updates(self): text_map__merge={"bar": '3', "baz": '4'}) obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster) self.assertEqual(obj.text_map, {"foo": '1', "bar": '3', "baz": '4'}) + + def test_map_merge_none_deletes_key(self): + """ The CQL behavior is if you set a key in a map to null it deletes + that key from the map. Test that this works with __merge. + + This test fails because of a bug in the cql python library not + converting None to null (and the cql library is no longer in active + developement). + """ + # partition = uuid4() + # cluster = 1 + # TestQueryUpdateModel.objects.create( + # partition=partition, cluster=cluster, + # text_map={"foo": '1', "bar": '2'}) + # TestQueryUpdateModel.objects( + # partition=partition, cluster=cluster).update( + # text_map__merge={"bar": None}) + # obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster) + # self.assertEqual(obj.text_map, {"foo": '1'}) From a2c7a547ee2a8e0401862f190fafeb3ab9c2d4d4 Mon Sep 17 00:00:00 2001 From: Niklas Korz Date: Tue, 11 Feb 2014 18:53:09 +0100 Subject: [PATCH 0622/3726] QueryException Token values len I assume these two have to be swapped, otherwise it doesn't make much sense. --- cqlengine/query.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index f991f5e9cb..3c629ebf23 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -385,7 +385,7 @@ def filter(self, *args, **kwargs): if len(partition_columns) != len(val.value): raise QueryException( 'Token() received {} arguments but model has {} partition keys'.format( - len(partition_columns), len(val.value))) + len(val.value), len(partition_columns))) val.set_columns(partition_columns) #get query operator, or use equals if not supplied From a32b92165863db3af83240c23fa71811b058558a Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Tue, 11 Feb 2014 11:19:03 -0800 Subject: [PATCH 0623/3726] removing uneccesary try/except block --- cqlengine/connection.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index 2eb2d30ef9..e03a35b136 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -146,10 +146,7 @@ def get(self): # is immediately available, else raises the Empty exception return self._queue.get(block=False) except queue.Empty: - try: - return self._create_connection() - except CQLConnectionError as cqle: - raise cqle + return self._create_connection() def put(self, conn): """ From 7fa6a6ed9b3dad79e4e7393322b39c1c46b5ff18 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Tue, 11 Feb 2014 11:22:42 -0800 Subject: [PATCH 0624/3726] updating changelog --- changelog | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/changelog b/changelog index 1284fa6015..af328c69fd 100644 --- a/changelog +++ b/changelog @@ -2,7 +2,8 @@ CHANGELOG 0.11.1 (in progress) -* Normalize and unquote boolean values. +* Normalize and unquote boolean values. (Thanks Kevin Deldycke github.com/kdeldycke) +* Fix race condition in connection manager (Thanks Roey Berman github.com/bergundy) 0.11.0 @@ -10,7 +11,7 @@ CHANGELOG - allows for long, timedelta, and datetime * fixed use of USING TIMESTAMP in batches * clear TTL and timestamp off models after persisting to DB -* allows UUID without - (Thanks to Michael Haddad, github.com/mahall) +* allows UUID without dashes - (Thanks to Michael Hall, github.com/mahall) * fixes regarding syncing schema settings (thanks Kai Lautaportti github.com/dokai) 0.10.0 From f5d329d72354f9492b8752631b31a8d0a5aa7ee6 Mon Sep 17 00:00:00 2001 From: Danny Cosson Date: Fri, 21 Feb 2014 17:30:51 -0500 Subject: [PATCH 0625/3726] reverts so we don't need to change the update_context method for map, we just allow the dict column Quoter to have get() and keys() methods --- cqlengine/columns.py | 6 ++++++ cqlengine/statements.py | 5 +++-- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index 4ba74273b1..1fbb9d037a 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -763,6 +763,12 @@ def __str__(self): cq = cql_quote return '{' + ', '.join([cq(k) + ':' + cq(v) for k,v in self.value.items()]) + '}' + def get(self, key): + return self.value.get(key) + + def keys(self): + return self.value.keys() + def __init__(self, key_type, value_type, default=dict, **kwargs): """ :param key_type: a column class indicating the types of the key diff --git a/cqlengine/statements.py b/cqlengine/statements.py index 5ae1d213d4..8d64d7ec6e 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -317,7 +317,7 @@ def __init__(self, field, value, operation=None, previous=None, column=None): def _analyze(self): if self._operation == "merge": - self._updates = self.value.value + self._updates = self.value.keys() else: self._updates = sorted([k for k, v in self.value.items() if v != self.previous.get(k)]) or None self._analyzed = True @@ -329,7 +329,8 @@ def get_context_size(self): def update_context(self, ctx): if not self._analyzed: self._analyze() ctx_id = self.context_id - for key, val in self._updates.items(): + for key in self._updates or []: + val = self.value.get(key) ctx[str(ctx_id)] = self._column.key_col.to_database(key) if self._column else key ctx[str(ctx_id + 1)] = self._column.value_col.to_database(val) if self._column else val ctx_id += 2 From 76a8abd11cff544af7e6b4372f151c8d20019c00 Mon Sep 17 00:00:00 2001 From: Danny Cosson Date: Fri, 21 Feb 2014 17:59:22 -0500 Subject: [PATCH 0626/3726] Fixes issue because params got re-ordered --- cqlengine/query.py | 3 ++- cqlengine/statements.py | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index ce7036554b..288ccf07fa 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -794,7 +794,8 @@ def update(self): else: raise RuntimeError # do the stuff - clause = klass(col.db_field_name, val, val_mgr.previous_value, column=col) + clause = klass(col.db_field_name, val, + previous=val_mgr.previous_value, column=col) if clause.get_context_size() > 0: statement.add_assignment_clause(clause) else: diff --git a/cqlengine/statements.py b/cqlengine/statements.py index 8d64d7ec6e..2ee5f97652 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -350,7 +350,7 @@ def __unicode__(self): class CounterUpdateClause(ContainerUpdateClause): def __init__(self, field, value, previous=None, column=None): - super(CounterUpdateClause, self).__init__(field, value, previous, column) + super(CounterUpdateClause, self).__init__(field, value, previous=previous, column=column) self.previous = self.previous or 0 def get_context_size(self): From dfd8b444286c2ca0bd5a5c57bcced5abe10e0395 Mon Sep 17 00:00:00 2001 From: Danny Cosson Date: Fri, 21 Feb 2014 18:08:53 -0500 Subject: [PATCH 0627/3726] updates tests that had broken because of arguments being rearranged --- .../tests/statements/test_assignment_clauses.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/cqlengine/tests/statements/test_assignment_clauses.py b/cqlengine/tests/statements/test_assignment_clauses.py index f7fbda38fb..64b35f892b 100644 --- a/cqlengine/tests/statements/test_assignment_clauses.py +++ b/cqlengine/tests/statements/test_assignment_clauses.py @@ -16,7 +16,7 @@ def test_insert_tuple(self): class SetUpdateClauseTests(TestCase): def test_update_from_none(self): - c = SetUpdateClause('s', {1, 2}, None) + c = SetUpdateClause('s', {1, 2}, previous=None) c._analyze() c.set_context_id(0) @@ -33,7 +33,7 @@ def test_update_from_none(self): def test_null_update(self): """ tests setting a set to None creates an empty update statement """ - c = SetUpdateClause('s', None, {1, 2}) + c = SetUpdateClause('s', None, previous={1, 2}) c._analyze() c.set_context_id(0) @@ -50,7 +50,7 @@ def test_null_update(self): def test_no_update(self): """ tests an unchanged value creates an empty update statement """ - c = SetUpdateClause('s', {1, 2}, {1, 2}) + c = SetUpdateClause('s', {1, 2}, previous={1, 2}) c._analyze() c.set_context_id(0) @@ -66,7 +66,7 @@ def test_no_update(self): self.assertEqual(ctx, {}) def test_additions(self): - c = SetUpdateClause('s', {1, 2, 3}, {1, 2}) + c = SetUpdateClause('s', {1, 2, 3}, previous={1, 2}) c._analyze() c.set_context_id(0) @@ -82,7 +82,7 @@ def test_additions(self): self.assertEqual(ctx, {'0': {3}}) def test_removals(self): - c = SetUpdateClause('s', {1, 2}, {1, 2, 3}) + c = SetUpdateClause('s', {1, 2}, previous={1, 2, 3}) c._analyze() c.set_context_id(0) @@ -98,7 +98,7 @@ def test_removals(self): self.assertEqual(ctx, {'0': {3}}) def test_additions_and_removals(self): - c = SetUpdateClause('s', {2, 3}, {1, 2}) + c = SetUpdateClause('s', {2, 3}, previous={1, 2}) c._analyze() c.set_context_id(0) From fc405a1c0693419f3e6c307367111399782d0922 Mon Sep 17 00:00:00 2001 From: Caleb Rackliffe Date: Tue, 25 Feb 2014 17:50:58 -0800 Subject: [PATCH 0628/3726] Fixed the __gte filtering example --- docs/topics/queryset.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/topics/queryset.rst b/docs/topics/queryset.rst index 569fb58103..471ed91260 100644 --- a/docs/topics/queryset.rst +++ b/docs/topics/queryset.rst @@ -29,7 +29,7 @@ Retrieving all objects .. _retrieving-objects-with-filters: Retrieving objects with filters ----------------------------------------- +------------------------------- Typically, you'll want to query only a subset of the records in your database. That can be accomplished with the QuerySet's ``.filter(\*\*)`` method. @@ -156,7 +156,7 @@ Filtering Operators # or the nicer syntax - Automobile.objects.filter(Automobile.manufacturer == 'Tesla') + q.filter(Automobile.year >= 2010) :attr:`< (__lt) ` @@ -270,7 +270,7 @@ Values Lists Batch Queries -=============== +============= cqlengine now supports batch queries using the BatchQuery class. Batch queries can be started and stopped manually, or within a context manager. To add queries to the batch object, you just need to precede the create/save/delete call with a call to batch, and pass in the batch object. From bb482fbf837b3506c9fd7870c377e535636cfd4b Mon Sep 17 00:00:00 2001 From: Caleb Rackliffe Date: Wed, 26 Feb 2014 10:20:55 -0800 Subject: [PATCH 0629/3726] Fixed a couple little formatting things pointed out by the inspector... --- docs/conf.py | 12 +++++------- docs/topics/connection.rst | 4 ++-- docs/topics/models.rst | 2 +- 3 files changed, 8 insertions(+), 10 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index 37422f20a5..1c01444817 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -11,7 +11,7 @@ # All configuration values have a default; values that are commented out # serve to show the default. -import sys, os +import os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the @@ -54,7 +54,6 @@ release = version - # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None @@ -186,8 +185,7 @@ # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ - ('index', 'cqlengine.tex', u'cqlengine Documentation', - u'Blake Eggleston', 'manual'), + ('index', 'cqlengine.tex', u'cqlengine Documentation', u'Blake Eggleston', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of @@ -230,9 +228,9 @@ # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ - ('index', 'cqlengine', u'cqlengine Documentation', - u'Blake Eggleston', 'cqlengine', 'One line description of project.', - 'Miscellaneous'), + ('index', 'cqlengine', u'cqlengine Documentation', + u'Blake Eggleston', 'cqlengine', 'One line description of project.', + 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. diff --git a/docs/topics/connection.rst b/docs/topics/connection.rst index 6140584704..53eaeafad6 100644 --- a/docs/topics/connection.rst +++ b/docs/topics/connection.rst @@ -1,6 +1,6 @@ -============== +========== Connection -============== +========== **Users of versions < 0.4, please read this post before upgrading:** `Breaking Changes`_ diff --git a/docs/topics/models.rst b/docs/topics/models.rst index b34d7f5332..d3c002a1a0 100644 --- a/docs/topics/models.rst +++ b/docs/topics/models.rst @@ -263,7 +263,7 @@ Extending Model Validation Compaction Options -==================== +================== As of cqlengine 0.7 we've added support for specifying compaction options. cqlengine will only use your compaction options if you have a strategy set. When a table is synced, it will be altered to match the compaction options set on your table. This means that if you are changing settings manually they will be changed back on resync. Do not use the compaction settings of cqlengine if you want to manage your compaction settings manually. From dff75adf4422e055a34d1bddda09e66a114f6396 Mon Sep 17 00:00:00 2001 From: Omer Katz Date: Thu, 6 Mar 2014 18:39:39 +0300 Subject: [PATCH 0630/3726] Fixed a typo in the title. --- docs/topics/manage_schemas.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/topics/manage_schemas.rst b/docs/topics/manage_schemas.rst index 413b53c481..7f9c63c1d5 100644 --- a/docs/topics/manage_schemas.rst +++ b/docs/topics/manage_schemas.rst @@ -1,6 +1,6 @@ -=============== -Managing Schmas -=============== +================ +Managing Schemas +================ **Users of versions < 0.4, please read this post before upgrading:** `Breaking Changes`_ From 1ef653da5931b17cbf33fef0d2c9edf221fc2c59 Mon Sep 17 00:00:00 2001 From: Daniel Date: Tue, 11 Mar 2014 14:15:19 +0100 Subject: [PATCH 0631/3726] fix(docs): add `import uuid` for Getting Started For better copy & paste experience --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 15c591cb8b..b99b182834 100644 --- a/README.md +++ b/README.md @@ -20,6 +20,7 @@ pip install cqlengine ```python #first, define a model +import uuid from cqlengine import columns from cqlengine.models import Model From 1c6660199b3f1b5ab6d054151a39cb3cd8ad73a1 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Tue, 11 Mar 2014 13:08:53 -0700 Subject: [PATCH 0632/3726] reformatting --- cqlengine/tests/test_timestamp.py | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/cqlengine/tests/test_timestamp.py b/cqlengine/tests/test_timestamp.py index a5d9b870cc..261564319f 100644 --- a/cqlengine/tests/test_timestamp.py +++ b/cqlengine/tests/test_timestamp.py @@ -3,7 +3,6 @@ """ from datetime import timedelta, datetime -import unittest from uuid import uuid4 import mock import sure @@ -19,12 +18,15 @@ class TestTimestampModel(Model): class BaseTimestampTest(BaseCassEngTestCase): + @classmethod def setUpClass(cls): super(BaseTimestampTest, cls).setUpClass() sync_table(TestTimestampModel) + class BatchTest(BaseTimestampTest): + def test_batch_is_included(self): with mock.patch.object(ConnectionPool, "execute") as m, BatchQuery(timestamp=timedelta(seconds=30)) as b: TestTimestampModel.batch(b).create(count=1) @@ -32,7 +34,6 @@ def test_batch_is_included(self): "USING TIMESTAMP".should.be.within(m.call_args[0][0]) - class CreateWithTimestampTest(BaseTimestampTest): def test_batch(self): @@ -44,8 +45,6 @@ def test_batch(self): query.should.match(r"INSERT.*USING TIMESTAMP") query.should_not.match(r"TIMESTAMP.*INSERT") - - def test_timestamp_not_included_on_normal_create(self): with mock.patch.object(ConnectionPool, "execute") as m: TestTimestampModel.create(count=2) @@ -90,7 +89,9 @@ def test_instance_update_in_batch(self): query = m.call_args[0][0] "USING TIMESTAMP".should.be.within(query) + class DeleteWithTimestampTest(BaseTimestampTest): + def test_non_batch(self): """ we don't expect the model to come back at the end because the deletion timestamp should be in the future @@ -122,10 +123,6 @@ def test_non_batch(self): tmp.update() tmp._timestamp.shouldnt.be.ok - - - - def test_blind_delete(self): """ we don't expect the model to come back at the end because the deletion timestamp should be in the future @@ -178,6 +175,3 @@ def test_delete_in_the_past(self): TestTimestampModel.get(id=uid) - - - From cc93d45f3cfc050742030b24023d63a6e2125c56 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Tue, 11 Mar 2014 13:27:12 -0700 Subject: [PATCH 0633/3726] more formatting --- cqlengine/statements.py | 1 - cqlengine/tests/query/test_queryset.py | 84 +++++++++++++------------- 2 files changed, 43 insertions(+), 42 deletions(-) diff --git a/cqlengine/statements.py b/cqlengine/statements.py index f4cd28836a..2a90c5bc43 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -483,7 +483,6 @@ def timestamp_normalized(self): return long(((tmp - datetime.fromtimestamp(0)).total_seconds()) * 1000000) - def __unicode__(self): raise NotImplementedError diff --git a/cqlengine/tests/query/test_queryset.py b/cqlengine/tests/query/test_queryset.py index fbe2affdcc..2144dee5ac 100644 --- a/cqlengine/tests/query/test_queryset.py +++ b/cqlengine/tests/query/test_queryset.py @@ -22,6 +22,7 @@ class TzOffset(tzinfo): """Minimal implementation of a timezone offset to help testing with timezone aware datetimes. """ + def __init__(self, offset): self._offset = timedelta(hours=offset) @@ -34,6 +35,7 @@ def tzname(self, dt): def dst(self, dt): return timedelta(0) + class TestModel(Model): test_id = columns.Integer(primary_key=True) attempt_id = columns.Integer(primary_key=True) @@ -41,6 +43,7 @@ class TestModel(Model): expected_result = columns.Integer() test_result = columns.Integer() + class IndexedTestModel(Model): test_id = columns.Integer(primary_key=True) attempt_id = columns.Integer(index=True) @@ -48,6 +51,7 @@ class IndexedTestModel(Model): expected_result = columns.Integer() test_result = columns.Integer(index=True) + class TestMultiClusteringModel(Model): one = columns.Integer(primary_key=True) two = columns.Integer(primary_key=True) @@ -55,7 +59,6 @@ class TestMultiClusteringModel(Model): class TestQuerySetOperation(BaseCassEngTestCase): - def test_query_filter_parsing(self): """ Tests the queryset filter method parses it's kwargs properly @@ -164,8 +167,8 @@ def test_defining_only_or_defer_on_nonexistant_fields_fails(self): Tests that setting only or defer fields that don't exist raises an exception """ -class BaseQuerySetUsage(BaseCassEngTestCase): +class BaseQuerySetUsage(BaseCassEngTestCase): @classmethod def setUpClass(cls): super(BaseQuerySetUsage, cls).setUpClass() @@ -201,9 +204,12 @@ def setUpClass(cls): IndexedTestModel.objects.create(test_id=7, attempt_id=3, description='try8', expected_result=20, test_result=20) IndexedTestModel.objects.create(test_id=8, attempt_id=0, description='try9', expected_result=50, test_result=40) - IndexedTestModel.objects.create(test_id=9, attempt_id=1, description='try10', expected_result=60, test_result=40) - IndexedTestModel.objects.create(test_id=10, attempt_id=2, description='try11', expected_result=70, test_result=45) - IndexedTestModel.objects.create(test_id=11, attempt_id=3, description='try12', expected_result=75, test_result=45) + IndexedTestModel.objects.create(test_id=9, attempt_id=1, description='try10', expected_result=60, + test_result=40) + IndexedTestModel.objects.create(test_id=10, attempt_id=2, description='try11', expected_result=70, + test_result=45) + IndexedTestModel.objects.create(test_id=11, attempt_id=3, description='try12', expected_result=75, + test_result=45) @classmethod def tearDownClass(cls): @@ -212,8 +218,8 @@ def tearDownClass(cls): drop_table(IndexedTestModel) drop_table(TestMultiClusteringModel) -class TestQuerySetCountSelectionAndIteration(BaseQuerySetUsage): +class TestQuerySetCountSelectionAndIteration(BaseQuerySetUsage): def test_count(self): """ Tests that adding filtering statements affects the count query as expected """ assert TestModel.objects.count() == 12 @@ -232,7 +238,7 @@ def test_iteration(self): """ Tests that iterating over a query set pulls back all of the expected results """ q = TestModel.objects(test_id=0) #tuple of expected attempt_id, expected_result values - compare_set = set([(0,5), (1,10), (2,15), (3,20)]) + compare_set = set([(0, 5), (1, 10), (2, 15), (3, 20)]) for t in q: val = t.attempt_id, t.expected_result assert val in compare_set @@ -243,7 +249,7 @@ def test_iteration(self): q = TestModel.objects(attempt_id=3).allow_filtering() assert len(q) == 3 #tuple of expected test_id, expected_result values - compare_set = set([(0,20), (1,20), (2,75)]) + compare_set = set([(0, 20), (1, 20), (2, 75)]) for t in q: val = t.test_id, t.expected_result assert val in compare_set @@ -254,7 +260,7 @@ def test_iteration(self): q = TestModel.objects(TestModel.attempt_id == 3).allow_filtering() assert len(q) == 3 #tuple of expected test_id, expected_result values - compare_set = set([(0,20), (1,20), (2,75)]) + compare_set = set([(0, 20), (1, 20), (2, 75)]) for t in q: val = t.test_id, t.expected_result assert val in compare_set @@ -266,7 +272,7 @@ def test_multiple_iterations_work_properly(self): # test with both the filtering method and the query method for q in (TestModel.objects(test_id=0), TestModel.objects(TestModel.test_id == 0)): #tuple of expected attempt_id, expected_result values - compare_set = set([(0,5), (1,10), (2,15), (3,20)]) + compare_set = set([(0, 5), (1, 10), (2, 15), (3, 20)]) for t in q: val = t.attempt_id, t.expected_result assert val in compare_set @@ -274,7 +280,7 @@ def test_multiple_iterations_work_properly(self): assert len(compare_set) == 0 #try it again - compare_set = set([(0,5), (1,10), (2,15), (3,20)]) + compare_set = set([(0, 5), (1, 10), (2, 15), (3, 20)]) for t in q: val = t.attempt_id, t.expected_result assert val in compare_set @@ -287,7 +293,7 @@ def test_multiple_iterators_are_isolated(self): """ for q in (TestModel.objects(test_id=0), TestModel.objects(TestModel.test_id == 0)): q = q.order_by('attempt_id') - expected_order = [0,1,2,3] + expected_order = [0, 1, 2, 3] iter1 = iter(q) iter2 = iter(q) for attempt_id in expected_order: @@ -354,18 +360,18 @@ def test_allow_filtering_flag(self): """ """ + class TestQuerySetOrdering(BaseQuerySetUsage): def test_order_by_success_case(self): - q = TestModel.objects(test_id=0).order_by('attempt_id') - expected_order = [0,1,2,3] - for model, expect in zip(q,expected_order): + expected_order = [0, 1, 2, 3] + for model, expect in zip(q, expected_order): assert model.attempt_id == expect q = q.order_by('-attempt_id') expected_order.reverse() - for model, expect in zip(q,expected_order): + for model, expect in zip(q, expected_order): assert model.attempt_id == expect def test_ordering_by_non_second_primary_keys_fail(self): @@ -403,7 +409,6 @@ def test_ordering_on_multiple_clustering_columns(self): class TestQuerySetSlicing(BaseQuerySetUsage): - def test_out_of_range_index_raises_error(self): q = TestModel.objects(test_id=0).order_by('attempt_id') with self.assertRaises(IndexError): @@ -411,32 +416,32 @@ def test_out_of_range_index_raises_error(self): def test_array_indexing_works_properly(self): q = TestModel.objects(test_id=0).order_by('attempt_id') - expected_order = [0,1,2,3] + expected_order = [0, 1, 2, 3] for i in range(len(q)): assert q[i].attempt_id == expected_order[i] def test_negative_indexing_works_properly(self): q = TestModel.objects(test_id=0).order_by('attempt_id') - expected_order = [0,1,2,3] + expected_order = [0, 1, 2, 3] assert q[-1].attempt_id == expected_order[-1] assert q[-2].attempt_id == expected_order[-2] def test_slicing_works_properly(self): q = TestModel.objects(test_id=0).order_by('attempt_id') - expected_order = [0,1,2,3] + expected_order = [0, 1, 2, 3] for model, expect in zip(q[1:3], expected_order[1:3]): assert model.attempt_id == expect def test_negative_slicing(self): q = TestModel.objects(test_id=0).order_by('attempt_id') - expected_order = [0,1,2,3] + expected_order = [0, 1, 2, 3] for model, expect in zip(q[-3:], expected_order[-3:]): assert model.attempt_id == expect for model, expect in zip(q[:-1], expected_order[:-1]): assert model.attempt_id == expect -class TestQuerySetValidation(BaseQuerySetUsage): +class TestQuerySetValidation(BaseQuerySetUsage): def test_primary_key_or_index_must_be_specified(self): """ Tests that queries that don't have an equals relation to a primary key or indexed field fail @@ -453,7 +458,6 @@ def test_primary_key_or_index_must_have_equal_relation_filter(self): q = TestModel.objects(test_id__gt=0) list([i for i in q]) - def test_indexed_field_can_be_queried(self): """ Tests that queries on an indexed field will work without any primary key relations specified @@ -461,8 +465,8 @@ def test_indexed_field_can_be_queried(self): q = IndexedTestModel.objects(test_result=25) assert q.count() == 4 -class TestQuerySetDelete(BaseQuerySetUsage): +class TestQuerySetDelete(BaseQuerySetUsage): def test_delete(self): TestModel.objects.create(test_id=3, attempt_id=0, description='try9', expected_result=50, test_result=40) TestModel.objects.create(test_id=3, attempt_id=1, description='try10', expected_result=60, test_result=40) @@ -487,15 +491,15 @@ def test_delete_without_any_where_args(self): with self.assertRaises(query.QueryException): TestModel.objects(attempt_id=0).delete() -class TestQuerySetConnectionHandling(BaseQuerySetUsage): +class TestQuerySetConnectionHandling(BaseQuerySetUsage): def test_conn_is_returned_after_filling_cache(self): """ Tests that the queryset returns it's connection after it's fetched all of it's results """ q = TestModel.objects(test_id=0) #tuple of expected attempt_id, expected_result values - compare_set = set([(0,5), (1,10), (2,15), (3,20)]) + compare_set = set([(0, 5), (1, 10), (2, 15), (3, 20)]) for t in q: val = t.attempt_id, t.expected_result assert val in compare_set @@ -506,13 +510,12 @@ def test_conn_is_returned_after_filling_cache(self): class TimeUUIDQueryModel(Model): - partition = columns.UUID(primary_key=True) - time = columns.TimeUUID(primary_key=True) - data = columns.Text(required=False) + partition = columns.UUID(primary_key=True) + time = columns.TimeUUID(primary_key=True) + data = columns.Text(required=False) class TestMinMaxTimeUUIDFunctions(BaseCassEngTestCase): - @classmethod def setUpClass(cls): super(TestMinMaxTimeUUIDFunctions, cls).setUpClass() @@ -585,14 +588,14 @@ def test_success_case(self): q = [d for d in q] assert len(q) == 2 datas = [d.data for d in q] - assert '1' in datas - assert '2' in datas + assert '1' in datas + assert '2' in datas q = TimeUUIDQueryModel.filter(partition=pk, time__gte=functions.MinTimeUUID(midpoint)) assert len(q) == 2 datas = [d.data for d in q] - assert '3' in datas - assert '4' in datas + assert '3' in datas + assert '4' in datas # test query expression filtering q = TimeUUIDQueryModel.filter( @@ -602,8 +605,8 @@ def test_success_case(self): q = [d for d in q] assert len(q) == 2 datas = [d.data for d in q] - assert '1' in datas - assert '2' in datas + assert '1' in datas + assert '2' in datas q = TimeUUIDQueryModel.filter( TimeUUIDQueryModel.partition == pk, @@ -611,15 +614,14 @@ def test_success_case(self): ) assert len(q) == 2 datas = [d.data for d in q] - assert '3' in datas - assert '4' in datas + assert '3' in datas + assert '4' in datas class TestInOperator(BaseQuerySetUsage): - def test_kwarg_success_case(self): """ Tests the in operator works with the kwarg query method """ - q = TestModel.filter(test_id__in=[0,1]) + q = TestModel.filter(test_id__in=[0, 1]) assert q.count() == 8 def test_query_expression_success_case(self): @@ -637,8 +639,8 @@ def test_values_list(self): item = q.values_list('expected_result', flat=True).first() assert item == 10 -class TestObjectsProperty(BaseQuerySetUsage): +class TestObjectsProperty(BaseQuerySetUsage): def test_objects_property_returns_fresh_queryset(self): assert TestModel.objects._result_cache is None len(TestModel.objects) # evaluate queryset From b924eec0be5647783f6d4145cdac53627ce19f8c Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Tue, 11 Mar 2014 13:43:16 -0700 Subject: [PATCH 0634/3726] adding setitem method to base model --- cqlengine/models.py | 8 ++++++++ cqlengine/tests/model/test_model_io.py | 3 +++ 2 files changed, 11 insertions(+) diff --git a/cqlengine/models.py b/cqlengine/models.py index 5655c9df54..f771b94f04 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -436,6 +436,14 @@ def __getitem__(self, key): raise KeyError return getattr(self, key) + def __setitem__(self, key, val): + """ Sets a column's value. """ + if not isinstance(key, basestring): + raise TypeError + if key not in self._columns.keys(): + raise KeyError + return setattr(self, key, val) + def __len__(self): """ Returns the number of columns defined on that model. """ return len(self._columns.keys()) diff --git a/cqlengine/tests/model/test_model_io.py b/cqlengine/tests/model/test_model_io.py index 512aa2324f..e20e588571 100644 --- a/cqlengine/tests/model/test_model_io.py +++ b/cqlengine/tests/model/test_model_io.py @@ -64,6 +64,9 @@ def test_model_read_as_dict(self): for column_id in column_dict.keys(): self.assertEqual(tm[column_id], column_dict[column_id]) + tm['count'] = 6 + self.assertEqual(tm.count, 6) + def test_model_updating_works_properly(self): """ Tests that subsequent saves after initial model creation work From a8d86c6cfc0d84d979b747daeed4f8761f0d520d Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Tue, 11 Mar 2014 13:48:08 -0700 Subject: [PATCH 0635/3726] updating the changelog --- changelog | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/changelog b/changelog index aea10c7b79..aa0bdb5590 100644 --- a/changelog +++ b/changelog @@ -1,9 +1,10 @@ CHANGELOG -0.11.1 (in progress) +0.12.0 (in progress) * Normalize and unquote boolean values. (Thanks Kevin Deldycke github.com/kdeldycke) * Fix race condition in connection manager (Thanks Roey Berman github.com/bergundy) +* allow access to instance columns as if it is a dict (Thanks Kevin Deldycke github.com/kdeldycke) 0.11.0 @@ -13,7 +14,6 @@ CHANGELOG * clear TTL and timestamp off models after persisting to DB * allows UUID without dashes - (Thanks to Michael Hall, github.com/mahall) * fixes regarding syncing schema settings (thanks Kai Lautaportti github.com/dokai) -* allow acces to instance columns as if it is a dict 0.10.0 From 17d24bbab866342104eb8ea4f253940b0be4b121 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Tue, 11 Mar 2014 13:49:37 -0700 Subject: [PATCH 0636/3726] adding documentation around model dict behavior --- docs/topics/models.rst | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/docs/topics/models.rst b/docs/topics/models.rst index d3c002a1a0..ee044f0e11 100644 --- a/docs/topics/models.rst +++ b/docs/topics/models.rst @@ -312,3 +312,24 @@ Compaction Options __compaction_tombstone_compaction_interval__ = 86400 Tables may use `LeveledCompactionStrategy` or `SizeTieredCompactionStrategy`. Both options are available in the top level cqlengine module. To reiterate, you will need to set your `__compaction__` option explicitly in order for cqlengine to handle any of your settings. + +Manipulating model instances as dictionaries +============================================ + + As of cqlengine 0.12, we've added support for treating model instances like dictionaries. See below for examples + + .. code-block:: python + + class Person(Model): + first_name = columns.Text() + last_name = columns.Text() + + kevin = Person.create(first_name="Kevin", last_name="Deldycke") + dict(kevin) # returns {'first_name': 'Kevin', 'last_name': 'Deldycke'} + kevin['first_name'] # returns 'Kevin' + kevin.keys() # returns ['first_name', 'last_name'] + kevin.values() # returns ['Kevin', 'Deldycke'] + kevin.items() # returns [('first_name', 'Kevin'), ('last_name', 'Deldycke')] + + kevin['first_name'] = 'KEVIN5000' # changes the models first name + From 877427ee179aab0846f52c4e9e4cae6781f0000f Mon Sep 17 00:00:00 2001 From: Danny Cosson Date: Wed, 12 Mar 2014 10:27:02 -0400 Subject: [PATCH 0637/3726] renames the map "merge" operation to "update" to be consistent with the python dict method --- cqlengine/statements.py | 2 +- cqlengine/tests/query/test_updates.py | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/cqlengine/statements.py b/cqlengine/statements.py index 92def0eb13..f2869c14c5 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -335,7 +335,7 @@ def __init__(self, field, value, operation=None, previous=None, column=None): self.previous = self.previous or {} def _analyze(self): - if self._operation == "merge": + if self._operation == "update": self._updates = self.value.keys() else: self._updates = sorted([k for k, v in self.value.items() if v != self.previous.get(k)]) or None diff --git a/cqlengine/tests/query/test_updates.py b/cqlengine/tests/query/test_updates.py index 592eb28595..d32755af8f 100644 --- a/cqlengine/tests/query/test_updates.py +++ b/cqlengine/tests/query/test_updates.py @@ -185,7 +185,7 @@ def test_list_prepend_updates(self): obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster) self.assertEqual(obj.text_list, ["bar", "baz", "foo"]) - def test_map_merge_updates(self): + def test_map_update_updates(self): """ Merge a dictionary into existing value """ partition = uuid4() cluster = 1 @@ -194,13 +194,13 @@ def test_map_merge_updates(self): text_map={"foo": '1', "bar": '2'}) TestQueryUpdateModel.objects( partition=partition, cluster=cluster).update( - text_map__merge={"bar": '3', "baz": '4'}) + text_map__update={"bar": '3', "baz": '4'}) obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster) self.assertEqual(obj.text_map, {"foo": '1', "bar": '3', "baz": '4'}) - def test_map_merge_none_deletes_key(self): + def test_map_update_none_deletes_key(self): """ The CQL behavior is if you set a key in a map to null it deletes - that key from the map. Test that this works with __merge. + that key from the map. Test that this works with __update. This test fails because of a bug in the cql python library not converting None to null (and the cql library is no longer in active @@ -213,6 +213,6 @@ def test_map_merge_none_deletes_key(self): # text_map={"foo": '1', "bar": '2'}) # TestQueryUpdateModel.objects( # partition=partition, cluster=cluster).update( - # text_map__merge={"bar": None}) + # text_map__update={"bar": None}) # obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster) # self.assertEqual(obj.text_map, {"foo": '1'}) From 83c1f151ef935e4dd0f39b0b986ca115c2e5c7a2 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Wed, 12 Mar 2014 10:50:55 -0700 Subject: [PATCH 0638/3726] updating dict docs --- docs/topics/models.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/topics/models.rst b/docs/topics/models.rst index ee044f0e11..7de1a5e40e 100644 --- a/docs/topics/models.rst +++ b/docs/topics/models.rst @@ -316,7 +316,7 @@ Compaction Options Manipulating model instances as dictionaries ============================================ - As of cqlengine 0.12, we've added support for treating model instances like dictionaries. See below for examples + As of cqlengine 0.12, we've added support for treating model instances like dictionaries. See below for examples. .. code-block:: python From fc140fcfc53ab3291e62c495949506614da55dd7 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Wed, 12 Mar 2014 10:57:24 -0700 Subject: [PATCH 0639/3726] making the container update class selection logic more readable --- cqlengine/query.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index 84fe695437..eb82479b42 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -682,12 +682,15 @@ def update(self, **values): # TODO: implement counter updates raise NotImplementedError elif isinstance(col, (List, Set, Map)): - if isinstance(col, List): klass = ListUpdateClause - elif isinstance(col, Set): klass = SetUpdateClause - elif isinstance(col, Map): klass = MapUpdateClause - else: raise RuntimeError - us.add_assignment_clause(klass( - col_name, col.to_database(val), operation=col_op)) + if isinstance(col, List): + klass = ListUpdateClause + elif isinstance(col, Set): + klass = SetUpdateClause + elif isinstance(col, Map): + klass = MapUpdateClause + else: + raise RuntimeError + us.add_assignment_clause(klass(col_name, col.to_database(val), operation=col_op)) else: us.add_assignment_clause(AssignmentClause( col_name, col.to_database(val))) From 71eff2b6a4e4d7dffce4038e286cffed6595f688 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Wed, 12 Mar 2014 16:28:39 -0700 Subject: [PATCH 0640/3726] adding documentation around blind container updates --- docs/topics/queryset.rst | 54 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 54 insertions(+) diff --git a/docs/topics/queryset.rst b/docs/topics/queryset.rst index 471ed91260..3323604089 100644 --- a/docs/topics/queryset.rst +++ b/docs/topics/queryset.rst @@ -400,3 +400,57 @@ QuerySet method reference Per column validation will be performed, but instance level validation will not (`Model.validate` is not called). + The queryset update method also supports blindly adding and removing elements from container columns, without + loading a model instance from Cassandra. + + Using the syntax `.update(column_name={x, y, z})` will overwrite the contents of the container, like updating a + non container column. However, adding `__` to the end of the keyword arg, makes the update call add + or remove items from the collection, without overwriting then entire column. + + + Given the model below, here are the operations that can be performed on the different container columns: + + .. code-block:: python + + class Row(Model): + row_id = columns.Integer(primary_key=True) + set_column = columns.Set(Integer) + list_column = columns.Set(Integer) + map_column = columns.Set(Integer, Integer) + + :class:`~cqlengine.columns.Set` + + - `add`: adds the elements of the given set to the column + - `remove`: removes the elements of the given set to the column + + + .. code-block:: python + + # add elements to a set + Row.objects(row_id=5).update(set_column__add={6}) + + # remove elements to a set + Row.objects(row_id=5).update(set_column__remove={4}) + + :class:`~cqlengine.columns.List` + + - `append`: appends the elements of the given list to the end of the column + - `prepend`: prepends the elements of the given list to the beginning of the column + + .. code-block:: python + + # append items to a list + Row.objects(row_id=5).update(list_column__append=[6, 7]) + + # prepend items to a list + Row.objects(row_id=5).update(list_column__prepend=[1, 2]) + + + :class:`~cqlengine.columns.Map` + + - `update`: adds the given keys/values to the columns, creating new entries if they didn't exist, and overwriting old ones if they did + + .. code-block:: python + + # add items to a map + Row.objects(row_id=5).update(map_column__update={1: 2, 3: 4}) From 3b05616fc404676f7b569ce337ee58f1782a89b6 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Wed, 12 Mar 2014 16:30:47 -0700 Subject: [PATCH 0641/3726] updating changelog --- changelog | 1 + 1 file changed, 1 insertion(+) diff --git a/changelog b/changelog index aa0bdb5590..52415504a3 100644 --- a/changelog +++ b/changelog @@ -5,6 +5,7 @@ CHANGELOG * Normalize and unquote boolean values. (Thanks Kevin Deldycke github.com/kdeldycke) * Fix race condition in connection manager (Thanks Roey Berman github.com/bergundy) * allow access to instance columns as if it is a dict (Thanks Kevin Deldycke github.com/kdeldycke) +* Added support for blind partial updates to queryset (Thanks Danny Cosson github.com/dcosson) 0.11.0 From cf101be618bcbbfbd93ef3af012191f46717f74c Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Wed, 12 Mar 2014 16:35:56 -0700 Subject: [PATCH 0642/3726] doing version bump --- changelog | 2 +- cqlengine/VERSION | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/changelog b/changelog index 52415504a3..de52d15e79 100644 --- a/changelog +++ b/changelog @@ -1,6 +1,6 @@ CHANGELOG -0.12.0 (in progress) +0.12.0 * Normalize and unquote boolean values. (Thanks Kevin Deldycke github.com/kdeldycke) * Fix race condition in connection manager (Thanks Roey Berman github.com/bergundy) diff --git a/cqlengine/VERSION b/cqlengine/VERSION index d9df1bbc0c..ac454c6a1f 100644 --- a/cqlengine/VERSION +++ b/cqlengine/VERSION @@ -1 +1 @@ -0.11.0 +0.12.0 From 270c8ca68357f92999474fbf110fed7b01cdfdf2 Mon Sep 17 00:00:00 2001 From: Kevin Deldycke Date: Thu, 13 Mar 2014 10:31:00 +0100 Subject: [PATCH 0643/3726] Use proper way to access package resources. --- cqlengine/__init__.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/cqlengine/__init__.py b/cqlengine/__init__.py index 51b3fc2f22..91828b6f29 100644 --- a/cqlengine/__init__.py +++ b/cqlengine/__init__.py @@ -1,11 +1,14 @@ import os +import pkg_resources from cqlengine.columns import * from cqlengine.functions import * from cqlengine.models import Model from cqlengine.query import BatchQuery -__cqlengine_version_path__ = os.path.realpath(__file__ + '/../VERSION') + +__cqlengine_version_path__ = pkg_resources.resource_filename('cqlengine', + 'VERSION') __version__ = open(__cqlengine_version_path__, 'r').readline().strip() # compaction From 23b8b12fa5ecb0797849e65d2631577ef4d88cd1 Mon Sep 17 00:00:00 2001 From: Mike Hall Date: Thu, 13 Mar 2014 09:52:55 -0700 Subject: [PATCH 0644/3726] compare against other instead of self in __eq__ --- cqlengine/models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cqlengine/models.py b/cqlengine/models.py index f771b94f04..6f6662863e 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -376,7 +376,7 @@ def __eq__(self, other): # check attribute keys keys = set(self._columns.keys()) - other_keys = set(self._columns.keys()) + other_keys = set(other._columns.keys()) if keys != other_keys: return False From 9b05f970e3ed293b63a279d59bf7400fd5dd0152 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Fri, 14 Mar 2014 14:29:42 -0700 Subject: [PATCH 0645/3726] removing note about indices on models with multiple primary keys --- docs/topics/models.rst | 2 -- 1 file changed, 2 deletions(-) diff --git a/docs/topics/models.rst b/docs/topics/models.rst index 7de1a5e40e..f6695d749b 100644 --- a/docs/topics/models.rst +++ b/docs/topics/models.rst @@ -84,8 +84,6 @@ Column Options :attr:`~cqlengine.columns.BaseColumn.index` If True, an index will be created for this column. Defaults to False. - *Note: Indexes can only be created on models with one primary key* - :attr:`~cqlengine.columns.BaseColumn.db_field` Explicitly sets the name of the column in the database table. If this is left blank, the column name will be the same as the name of the column attribute. Defaults to None. From cc503f1bf0857a0c1834d70cf03133bf8aae8679 Mon Sep 17 00:00:00 2001 From: Nati CT Date: Sun, 16 Mar 2014 16:51:25 +0200 Subject: [PATCH 0646/3726] fixed camelCase naming for container column names --- cqlengine/columns.py | 4 ++-- .../tests/columns/test_container_columns.py | 20 +++++++++++++++++++ 2 files changed, 22 insertions(+), 2 deletions(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index 2e203e44d6..7888981a97 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -537,7 +537,7 @@ def get_column_def(self): Returns a column definition for CQL table definition """ db_type = self.db_type.format(self.value_type.db_type) - return '{} {}'.format(self.db_field_name, db_type) + return '{} {}'.format(self.cql, db_type) def get_update_statement(self, val, prev, ctx): """ @@ -813,7 +813,7 @@ def get_column_def(self): self.key_type.db_type, self.value_type.db_type ) - return '{} {}'.format(self.db_field_name, db_type) + return '{} {}'.format(self.cql, db_type) def validate(self, value): val = super(Map, self).validate(value) diff --git a/cqlengine/tests/columns/test_container_columns.py b/cqlengine/tests/columns/test_container_columns.py index d54a339f06..2f803913f6 100644 --- a/cqlengine/tests/columns/test_container_columns.py +++ b/cqlengine/tests/columns/test_container_columns.py @@ -522,3 +522,23 @@ def test_default_empty_container_saving(self): # assert len([v for v in ctx.values() if [7,8,9] == v.value]) == 1 # assert len([s for s in statements if '"TEST" = "TEST" +' in s]) == 1 # assert len([s for s in statements if '+ "TEST"' in s]) == 1 + +class TestCamelMapModel(Model): + partition = columns.UUID(primary_key=True, default=uuid4) + camelMap = columns.Map(columns.Text, columns.Integer, required=False) + +class TestCamelMapColumn(BaseCassEngTestCase): + @classmethod + def setUpClass(cls): + super(TestCamelMapColumn, cls).setUpClass() + drop_table(TestCamelMapModel) + sync_table(TestCamelMapModel) + + @classmethod + def tearDownClass(cls): + super(TestCamelMapColumn, cls).tearDownClass() + drop_table(TestCamelMapModel) + + def test_camelcase_column(self): + TestCamelMapModel.create(partition=None, camelMap={'blah': 1}) + From 109c019836c229ee317e4867def49ec2fbe9dde2 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 20 Mar 2014 16:32:14 -0700 Subject: [PATCH 0647/3726] adding tests around equality --- cqlengine/tests/model/test_model.py | 33 +++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) create mode 100644 cqlengine/tests/model/test_model.py diff --git a/cqlengine/tests/model/test_model.py b/cqlengine/tests/model/test_model.py new file mode 100644 index 0000000000..a0a6929a27 --- /dev/null +++ b/cqlengine/tests/model/test_model.py @@ -0,0 +1,33 @@ +from unittest import TestCase + +from cqlengine.models import Model +from cqlengine import columns + + +class TestModel(TestCase): + """ Tests the non-io functionality of models """ + + def test_instance_equality(self): + """ tests the model equality functionality """ + class EqualityModel(Model): + pk = columns.Integer(primary_key=True) + + m0 = EqualityModel(pk=0) + m1 = EqualityModel(pk=1) + + self.assertEqual(m0, m0) + self.assertNotEqual(m0, m1) + + def test_model_equality(self): + """ tests the model equality functionality """ + class EqualityModel0(Model): + pk = columns.Integer(primary_key=True) + + class EqualityModel1(Model): + kk = columns.Integer(primary_key=True) + + m0 = EqualityModel0(pk=0) + m1 = EqualityModel1(kk=1) + + self.assertEqual(m0, m0) + self.assertNotEqual(m0, m1) From 31d3ae10595ba2e4766f517b8d40dc48c2d53636 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 20 Mar 2014 16:38:24 -0700 Subject: [PATCH 0648/3726] updating changelog --- changelog | 2 ++ 1 file changed, 2 insertions(+) diff --git a/changelog b/changelog index de52d15e79..515195fbab 100644 --- a/changelog +++ b/changelog @@ -6,6 +6,8 @@ CHANGELOG * Fix race condition in connection manager (Thanks Roey Berman github.com/bergundy) * allow access to instance columns as if it is a dict (Thanks Kevin Deldycke github.com/kdeldycke) * Added support for blind partial updates to queryset (Thanks Danny Cosson github.com/dcosson) +* Model instance equality check bugfix (Thanks to Michael Hall, github.com/mahall) +* Fixed bug syncing tables with camel cased names (Thanks to Netanel Cohen-Tzemach, github.com/natict) 0.11.0 From 48b33772d6b942c8323d56a51c0ce937974c6f34 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 20 Mar 2014 16:46:53 -0700 Subject: [PATCH 0649/3726] removing unused import --- cqlengine/__init__.py | 1 - 1 file changed, 1 deletion(-) diff --git a/cqlengine/__init__.py b/cqlengine/__init__.py index 91828b6f29..cd9f8f3557 100644 --- a/cqlengine/__init__.py +++ b/cqlengine/__init__.py @@ -1,4 +1,3 @@ -import os import pkg_resources from cqlengine.columns import * From 2fb3d5abe7897a946e1338ea3d83cbb79b5494f1 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 20 Mar 2014 16:48:04 -0700 Subject: [PATCH 0650/3726] updating changelog --- changelog | 1 + 1 file changed, 1 insertion(+) diff --git a/changelog b/changelog index 515195fbab..eb99e95382 100644 --- a/changelog +++ b/changelog @@ -8,6 +8,7 @@ CHANGELOG * Added support for blind partial updates to queryset (Thanks Danny Cosson github.com/dcosson) * Model instance equality check bugfix (Thanks to Michael Hall, github.com/mahall) * Fixed bug syncing tables with camel cased names (Thanks to Netanel Cohen-Tzemach, github.com/natict) +* Fixed bug dealing with eggs (Thanks Kevin Deldycke github.com/kdeldycke) 0.11.0 From 65f2ef9216609d7b4fd818c5d5fba1b01b7b24e8 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 20 Mar 2014 17:32:08 -0700 Subject: [PATCH 0651/3726] updating authors list --- AUTHORS | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/AUTHORS b/AUTHORS index 70cdaeaf80..2da11977a5 100644 --- a/AUTHORS +++ b/AUTHORS @@ -3,8 +3,14 @@ PRIMARY AUTHORS Blake Eggleston Jon Haddad -CONTRIBUTORS +CONTRIBUTORS (let us know if we missed you) Eric Scrivner - test environment, connection pooling Kevin Deldycke - +Roey Berman +Danny Cosson +Michael Hall +Netanel Cohen-Tzemach +Mariusz Kryński +Greg Doermann +@pandu-rao \ No newline at end of file From ae43b6f2a9a46e88dcc853542fd5a6323cae7ccb Mon Sep 17 00:00:00 2001 From: Daniel Dotsenko Date: Wed, 9 Apr 2014 13:27:08 -0700 Subject: [PATCH 0652/3726] adding callbacks support to BatchQuery --- cqlengine/query.py | 45 ++++++++++- cqlengine/tests/test_batch_query.py | 113 +++++++++++++++++++++++++--- docs/topics/queryset.rst | 93 +++++++++++++++++++++++ 3 files changed, 239 insertions(+), 12 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index eb82479b42..8586ca572e 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -78,8 +78,35 @@ class BatchQuery(object): """ _consistency = None + @property + def callbacks(self): + """ + Returns the set object that contains all of the callbacks presently attached to this batch. - def __init__(self, batch_type=None, timestamp=None, consistency=None, execute_on_exception=False): + :rtype: set + """ + return self._callbacks + + def __init__(self, batch_type=None, timestamp=None, consistency=None, execute_on_exception=False, callbacks=None): + """ + :param batch_type: (optional) One of batch type values available through BatchType enum + :type batch_type: str or None + :param timestamp: (optional) A datetime or timedelta object with desired timestamp to be applied + to the batch transaction. + :type timestamp: datetime or timedelta or None + :param consistency: (optional) One of consistency values ("ANY", "ONE", "QUORUM" etc) + :type consistency: str or None + :param execute_on_exception: (Defaults to False) Indicates that when the BatchQuery instance is used + as a context manager the queries accumulated within the context must be executed despite + encountering an error within the context. By default, any exception raised from within + the context scope will cause the batched queries not to be executed. + :type execute_on_exception: bool + :param callbacks: A list of functions to be executed after the batch executes. Note, that if the batch + does not execute, the callbacks are not executed. This, thus, effectively is a list of "on success" + callback handlers. If defined, must be a collection of callables. + At this time only one argument is passed into the callback - the batch instance. + :type callbacks: list or set or tuple + """ self.queries = [] self.batch_type = batch_type if timestamp is not None and not isinstance(timestamp, (datetime, timedelta)): @@ -87,6 +114,7 @@ def __init__(self, batch_type=None, timestamp=None, consistency=None, execute_on self.timestamp = timestamp self._consistency = consistency self._execute_on_exception = execute_on_exception + self._callbacks = set(callbacks or []) def add_query(self, query): if not isinstance(query, BaseCQLStatement): @@ -96,9 +124,23 @@ def add_query(self, query): def consistency(self, consistency): self._consistency = consistency + def _execute_callbacks(self): + for callback in self._callbacks: + if callable(callback): + stored_args = [] + else: + stored_args = callback[1:] + callback = callback[0] + callback(self, *stored_args) + + # trying to clear up the ref counts for objects mentioned in the set + del self._callbacks + def execute(self): if len(self.queries) == 0: # Empty batch is a no-op + # except for callbacks + self._execute_callbacks() return opener = 'BEGIN ' + (self.batch_type + ' ' if self.batch_type else '') + ' BATCH' @@ -130,6 +172,7 @@ def execute(self): execute('\n'.join(query_list), parameters, self._consistency) self.queries = [] + self._execute_callbacks() def __enter__(self): return self diff --git a/cqlengine/tests/test_batch_query.py b/cqlengine/tests/test_batch_query.py index 4beb33d9da..da7c944f97 100644 --- a/cqlengine/tests/test_batch_query.py +++ b/cqlengine/tests/test_batch_query.py @@ -17,6 +17,7 @@ class TestMultiKeyModel(Model): count = columns.Integer(required=False) text = columns.Text(required=False) + class BatchQueryTests(BaseCassEngTestCase): @classmethod @@ -41,24 +42,13 @@ def test_insert_success_case(self): b = BatchQuery() inst = TestMultiKeyModel.batch(b).create(partition=self.pkey, cluster=2, count=3, text='4') - with self.assertRaises(TestMultiKeyModel.DoesNotExist): TestMultiKeyModel.get(partition=self.pkey, cluster=2) b.execute() - TestMultiKeyModel.get(partition=self.pkey, cluster=2) - def test_batch_is_executed(self): - b = BatchQuery() - inst = TestMultiKeyModel.batch(b).create(partition=self.pkey, cluster=2, count=3, text='4') - - with self.assertRaises(TestMultiKeyModel.DoesNotExist): - TestMultiKeyModel.get(partition=self.pkey, cluster=2) - - b.execute() - def test_update_success_case(self): inst = TestMultiKeyModel.create(partition=self.pkey, cluster=2, count=3, text='4') @@ -125,3 +115,104 @@ def test_empty_batch(self): with BatchQuery() as b: pass + +class BatchQueryCallbacksTests(BaseCassEngTestCase): + + def test_API_managing_callbacks(self): + + # Callbacks can be added at init and after + + def my_callback(batch, *args, **kwargs): + pass + + # adding on init: + b = BatchQuery(callbacks=[ + my_callback, + my_callback, # will be filtered out automatically + (my_callback, 'more', 'args'), + (my_callback, 'more', 'args'), # will be filtered out too + ]) + assert b.callbacks == { + my_callback, + (my_callback, 'more', 'args') + } + + # adding using set API post-init: + + b.callbacks.add(my_callback) + b.callbacks.add((my_callback, 'more', 'args')) + b.callbacks.add((my_callback, 'yet more', 'args')) + assert b.callbacks == { + my_callback, + (my_callback, 'more', 'args'), + (my_callback, 'yet more', 'args') + } + + b.callbacks.remove(my_callback) + assert b.callbacks == { + (my_callback, 'more', 'args'), + (my_callback, 'yet more', 'args') + } + + b.callbacks.remove((my_callback, 'more', 'args')) + assert b.callbacks == { + (my_callback, 'yet more', 'args') + } + + + # insure that we disallow setting the callback colleciton on instance: + # (thus forcing use of init and set-like api tested above) + with self.assertRaises(AttributeError): + b.callbacks = set() + + def test_callbacks_properly_execute_callables_and_tuples(self): + + call_history = [] + def my_callback(*args, **kwargs): + call_history.append(args) + + # adding on init: + b = BatchQuery(callbacks=[ + my_callback, + (my_callback, 'more', 'args') + ]) + + b.execute() + + assert len(call_history) == 2 + assert {(b,), (b, 'more', 'args')} == set(call_history) + + def test_callbacks_tied_to_execute(self): + """Batch callbacks should NOT fire if batch is not executed in context manager mode""" + + call_history = [] + def my_callback(*args, **kwargs): + call_history.append(args) + + with BatchQuery(callbacks=[my_callback]) as b: + pass + + assert len(call_history) == 1 + + class SomeError(Exception): + pass + + with self.assertRaises(SomeError): + with BatchQuery(callbacks=[my_callback]) as b: + # this error bubbling up through context manager + # should prevent callback runs (along with b.execute()) + raise SomeError + + # still same call history + assert len(call_history) == 1 + + # but if execute run, even with an error bubbling through + # the callbacks also whoudl have fired + with self.assertRaises(SomeError): + with BatchQuery(execute_on_exception=True, callbacks=[my_callback]) as b: + # this error bubbling up through context manager + # should prevent callback runs (along with b.execute()) + raise SomeError + + # still same call history + assert len(call_history) == 2 diff --git a/docs/topics/queryset.rst b/docs/topics/queryset.rst index 3323604089..586344469e 100644 --- a/docs/topics/queryset.rst +++ b/docs/topics/queryset.rst @@ -274,6 +274,9 @@ Batch Queries cqlengine now supports batch queries using the BatchQuery class. Batch queries can be started and stopped manually, or within a context manager. To add queries to the batch object, you just need to precede the create/save/delete call with a call to batch, and pass in the batch object. +Batch Query General Use Pattern +------------------------------- + You can only create, update, and delete rows with a batch query, attempting to read rows out of the database with a batch query will fail. .. code-block:: python @@ -324,6 +327,96 @@ Batch Queries If an exception is thrown somewhere in the block, any statements that have been added to the batch will still be executed. This is useful for some logging situations. +Batch Query Execution Callbacks +------------------------------- + + In order to allow secondary tasks to be chained to the end of batch, BatchQuery instances allow callbacks to be + registered with the batch, to be executed immediately after the batch executes. + + Let's say an iterative action over a group of records needs to be followed up by a "maintenance" task like "Update + count of children on parent model." + If your were to kick off that task following manipulation on each child, while the batch is not executed, the reads + the maintenance task would do would operate on old data since the batch is not yet commited. + (You also risk kicking off too many of the same tasks, while only one is needed at the end of the batch.) + + The callbacks attached to a given batch instance are executed only if the batch executes. If the batch is used as a + context manager and an exception bubbles up, the queued up callbacks will not be run. + + The callback should be a callable object that accepts at least one (positional) argument - instance of the batch object. + + .. code-block:: python + + def example_callback_handler(batch, *args, **kwargs): + pass + + Multiple callbacks can be attached to same BatchQuery instance both, at the point of instantiation and later + directly to the batch object instance. + + The callbacks collection is implemented as a ``set``, which naturally deduplicates the callback handlers: + + .. code-block:: python + + # let's add a few of the same callbacks at init + batch = BatchQuery(callbacks=([ + example_callback_handler, + example_callback_handler + ]) + + # and one more time same callback now using the collection API on the batch instance + batch.callbacks.add(example_callback_handler) + + assert len(batch.callbacks) == 1 + assert batch.callbacks == {example_callback_handler} + + # this, of course, also means that if you remove a callback, + # you negate "all additions" of that callback + batch.callbacks.remove(example_callback_handler) + + assert len(batch.callbacks) == 0 + + In order to benefit from the natural deduplication of the callback handlers it is allowed to pass in tuples of + callback and its positional arguments. + This allows one to pre-package additional call arguments for the callable without the need to wrap the callable and + args into a closure or ``functools.partial``. + + .. code-block:: python + + batch = BatchQuery(callbacks=([ + (example_callback_handler, 'some', 'args'), + (example_callback_handler, 'some', 'args'), + (example_callback_handler, 'some other', 'args'), + ]) + + batch.callbacks.add((example_callback_handler, 'some other', 'args')) + + assert len(batch.callbacks) == 2 + assert batch.callbacks == { + (example_callback_handler, 'some', 'args'), + (example_callback_handler, 'some other', 'args') + } + + Note that the tuple callback object format prevents the possibily of essentially same callback firing multiple times, + which might happen if you package same callback and arguments several times with ``functools.partial``, which returns a new callable with each call. + + Arguments packaged into callbacks communicated as tuples are passed to callback handler **after** the ``batch`` argument. + + .. code-block:: python + + call_args = [] + + def callback_handler(batch, *args, **kwargs): + call_args.append([batch] + list(args)) + + batch = BatchQuery(callbacks=([(callback_handler, 'some', 'args')]) + + batch.execute() + + assert call_args == [[batch, 'some', 'args']] + + + Failure in any of the callbacks does not affect the batch's execution, as the callbacks are started after the execution + of the batch is complete and no effort to "roll it back" is made. + QuerySet method reference From 86ffd6a9741872c3094da22bac63b082e74ffe82 Mon Sep 17 00:00:00 2001 From: Daniel Dotsenko Date: Mon, 14 Apr 2014 11:56:30 -0700 Subject: [PATCH 0653/3726] removing `batch` from the list of args passed to BatchQuery callbacks --- cqlengine/query.py | 3 +-- cqlengine/tests/test_batch_query.py | 2 +- docs/topics/queryset.rst | 19 ++++++++++--------- 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index 8586ca572e..f2b6da14c1 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -104,7 +104,6 @@ def __init__(self, batch_type=None, timestamp=None, consistency=None, execute_on :param callbacks: A list of functions to be executed after the batch executes. Note, that if the batch does not execute, the callbacks are not executed. This, thus, effectively is a list of "on success" callback handlers. If defined, must be a collection of callables. - At this time only one argument is passed into the callback - the batch instance. :type callbacks: list or set or tuple """ self.queries = [] @@ -131,7 +130,7 @@ def _execute_callbacks(self): else: stored_args = callback[1:] callback = callback[0] - callback(self, *stored_args) + callback(*stored_args) # trying to clear up the ref counts for objects mentioned in the set del self._callbacks diff --git a/cqlengine/tests/test_batch_query.py b/cqlengine/tests/test_batch_query.py index da7c944f97..f956323a69 100644 --- a/cqlengine/tests/test_batch_query.py +++ b/cqlengine/tests/test_batch_query.py @@ -180,7 +180,7 @@ def my_callback(*args, **kwargs): b.execute() assert len(call_history) == 2 - assert {(b,), (b, 'more', 'args')} == set(call_history) + assert {tuple(), ('more', 'args')} == set(call_history) def test_callbacks_tied_to_execute(self): """Batch callbacks should NOT fire if batch is not executed in context manager mode""" diff --git a/docs/topics/queryset.rst b/docs/topics/queryset.rst index 586344469e..c05665527e 100644 --- a/docs/topics/queryset.rst +++ b/docs/topics/queryset.rst @@ -342,14 +342,16 @@ Batch Query Execution Callbacks The callbacks attached to a given batch instance are executed only if the batch executes. If the batch is used as a context manager and an exception bubbles up, the queued up callbacks will not be run. - The callback should be a callable object that accepts at least one (positional) argument - instance of the batch object. + The callback arguments signature is not prescribed. However, while it's possible to trap arguments you want to be + passed into the callback in a "callback" tuple you add to the batch, only passing of positional arguments is + supported at this time. This means there is no elegant way to communicate named arguments to the callback. .. code-block:: python - def example_callback_handler(batch, *args, **kwargs): + def example_callback_handler(*args): pass - Multiple callbacks can be attached to same BatchQuery instance both, at the point of instantiation and later + Multiple callbacks can be attached to same BatchQuery instance both, at the point of batch instantiation and later, directly to the batch object instance. The callbacks collection is implemented as a ``set``, which naturally deduplicates the callback handlers: @@ -396,22 +398,21 @@ Batch Query Execution Callbacks } Note that the tuple callback object format prevents the possibily of essentially same callback firing multiple times, - which might happen if you package same callback and arguments several times with ``functools.partial``, which returns a new callable with each call. - - Arguments packaged into callbacks communicated as tuples are passed to callback handler **after** the ``batch`` argument. + which might happen if you package same callback and arguments several times with ``functools.partial``, which returns + a new callable with each call. .. code-block:: python call_args = [] - def callback_handler(batch, *args, **kwargs): - call_args.append([batch] + list(args)) + def callback_handler(*args, **kwargs): + call_args.append(list(args)) batch = BatchQuery(callbacks=([(callback_handler, 'some', 'args')]) batch.execute() - assert call_args == [[batch, 'some', 'args']] + assert call_args[0] == ['some', 'args'] Failure in any of the callbacks does not affect the batch's execution, as the callbacks are started after the execution From 8093ac4bad555bf2f2f06c46b120a0cd7752a945 Mon Sep 17 00:00:00 2001 From: Daniel Dotsenko Date: Mon, 14 Apr 2014 18:10:48 -0700 Subject: [PATCH 0654/3726] switching to list as BatchQuery callbacks collection + add_callback() API --- cqlengine/query.py | 39 +++++++------- cqlengine/tests/test_batch_query.py | 78 ++++++++++----------------- docs/topics/queryset.rst | 82 +++++++---------------------- 3 files changed, 68 insertions(+), 131 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index f2b6da14c1..9e81f5a553 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -78,16 +78,7 @@ class BatchQuery(object): """ _consistency = None - @property - def callbacks(self): - """ - Returns the set object that contains all of the callbacks presently attached to this batch. - - :rtype: set - """ - return self._callbacks - - def __init__(self, batch_type=None, timestamp=None, consistency=None, execute_on_exception=False, callbacks=None): + def __init__(self, batch_type=None, timestamp=None, consistency=None, execute_on_exception=False): """ :param batch_type: (optional) One of batch type values available through BatchType enum :type batch_type: str or None @@ -113,7 +104,7 @@ def __init__(self, batch_type=None, timestamp=None, consistency=None, execute_on self.timestamp = timestamp self._consistency = consistency self._execute_on_exception = execute_on_exception - self._callbacks = set(callbacks or []) + self._callbacks = [] def add_query(self, query): if not isinstance(query, BaseCQLStatement): @@ -124,17 +115,29 @@ def consistency(self, consistency): self._consistency = consistency def _execute_callbacks(self): - for callback in self._callbacks: - if callable(callback): - stored_args = [] - else: - stored_args = callback[1:] - callback = callback[0] - callback(*stored_args) + for callback, args, kwargs in self._callbacks: + callback(*args, **kwargs) # trying to clear up the ref counts for objects mentioned in the set del self._callbacks + def add_callback(self, fn, *args, **kwargs): + """Add a function and arguments to be passed to it to be executed after the batch executes. + + A batch can support multiple callbacks. + + Note, that if the batch does not execute, the callbacks are not executed. + A callback, thus, is an "on batch success" handler. + + :param fn: Callable object + :type fn: callable + :param *args: Positional arguments to be passed to the callback at the time of execution + :param **kwargs: Named arguments to be passed to the callback at the time of execution + """ + if not callable(fn): + raise ValueError("Value for argument 'fn' is {} and is not a callable object.".format(type(fn))) + self._callbacks.append((fn, args, kwargs)) + def execute(self): if len(self.queries) == 0: # Empty batch is a no-op diff --git a/cqlengine/tests/test_batch_query.py b/cqlengine/tests/test_batch_query.py index f956323a69..7066643d39 100644 --- a/cqlengine/tests/test_batch_query.py +++ b/cqlengine/tests/test_batch_query.py @@ -122,48 +122,21 @@ def test_API_managing_callbacks(self): # Callbacks can be added at init and after - def my_callback(batch, *args, **kwargs): + def my_callback(*args, **kwargs): pass # adding on init: - b = BatchQuery(callbacks=[ - my_callback, - my_callback, # will be filtered out automatically - (my_callback, 'more', 'args'), - (my_callback, 'more', 'args'), # will be filtered out too - ]) - assert b.callbacks == { - my_callback, - (my_callback, 'more', 'args') - } - - # adding using set API post-init: - - b.callbacks.add(my_callback) - b.callbacks.add((my_callback, 'more', 'args')) - b.callbacks.add((my_callback, 'yet more', 'args')) - assert b.callbacks == { - my_callback, - (my_callback, 'more', 'args'), - (my_callback, 'yet more', 'args') - } - - b.callbacks.remove(my_callback) - assert b.callbacks == { - (my_callback, 'more', 'args'), - (my_callback, 'yet more', 'args') - } - - b.callbacks.remove((my_callback, 'more', 'args')) - assert b.callbacks == { - (my_callback, 'yet more', 'args') - } - - - # insure that we disallow setting the callback colleciton on instance: - # (thus forcing use of init and set-like api tested above) - with self.assertRaises(AttributeError): - b.callbacks = set() + batch = BatchQuery() + + batch.add_callback(my_callback) + batch.add_callback(my_callback, 2, named_arg='value') + batch.add_callback(my_callback, 1, 3) + + assert batch._callbacks == [ + (my_callback, (), {}), + (my_callback, (2,), {'named_arg':'value'}), + (my_callback, (1, 3), {}) + ] def test_callbacks_properly_execute_callables_and_tuples(self): @@ -172,15 +145,15 @@ def my_callback(*args, **kwargs): call_history.append(args) # adding on init: - b = BatchQuery(callbacks=[ - my_callback, - (my_callback, 'more', 'args') - ]) + batch = BatchQuery() - b.execute() + batch.add_callback(my_callback) + batch.add_callback(my_callback, 'more', 'args') + + batch.execute() assert len(call_history) == 2 - assert {tuple(), ('more', 'args')} == set(call_history) + assert [(), ('more', 'args')] == call_history def test_callbacks_tied_to_execute(self): """Batch callbacks should NOT fire if batch is not executed in context manager mode""" @@ -189,7 +162,8 @@ def test_callbacks_tied_to_execute(self): def my_callback(*args, **kwargs): call_history.append(args) - with BatchQuery(callbacks=[my_callback]) as b: + with BatchQuery() as batch: + batch.add_callback(my_callback) pass assert len(call_history) == 1 @@ -198,18 +172,20 @@ class SomeError(Exception): pass with self.assertRaises(SomeError): - with BatchQuery(callbacks=[my_callback]) as b: + with BatchQuery() as batch: + batch.add_callback(my_callback) # this error bubbling up through context manager # should prevent callback runs (along with b.execute()) raise SomeError - # still same call history + # still same call history. Nothing added assert len(call_history) == 1 - # but if execute run, even with an error bubbling through - # the callbacks also whoudl have fired + # but if execute ran, even with an error bubbling through + # the callbacks also would have fired with self.assertRaises(SomeError): - with BatchQuery(execute_on_exception=True, callbacks=[my_callback]) as b: + with BatchQuery(execute_on_exception=True) as batch: + batch.add_callback(my_callback) # this error bubbling up through context manager # should prevent callback runs (along with b.execute()) raise SomeError diff --git a/docs/topics/queryset.rst b/docs/topics/queryset.rst index c05665527e..d751152f2a 100644 --- a/docs/topics/queryset.rst +++ b/docs/topics/queryset.rst @@ -339,81 +339,39 @@ Batch Query Execution Callbacks the maintenance task would do would operate on old data since the batch is not yet commited. (You also risk kicking off too many of the same tasks, while only one is needed at the end of the batch.) + Multiple callbacks can be attached to same BatchQuery instance + The callbacks attached to a given batch instance are executed only if the batch executes. If the batch is used as a context manager and an exception bubbles up, the queued up callbacks will not be run. - The callback arguments signature is not prescribed. However, while it's possible to trap arguments you want to be - passed into the callback in a "callback" tuple you add to the batch, only passing of positional arguments is - supported at this time. This means there is no elegant way to communicate named arguments to the callback. - - .. code-block:: python - - def example_callback_handler(*args): - pass - - Multiple callbacks can be attached to same BatchQuery instance both, at the point of batch instantiation and later, - directly to the batch object instance. + The callback arguments signature is not prescribed. Moreover, the callback API allows trapping the arguments you want to be + passed into the callback. If you need to inspect the batch itself within the callback, simply trap the batch instance + as part of the arguments stored with the callback. - The callbacks collection is implemented as a ``set``, which naturally deduplicates the callback handlers: + Internally the callbacks collection is implemented as an ordered collection, which means the execution order follows + the order in which the callbacks are added to the batch. .. code-block:: python - # let's add a few of the same callbacks at init - batch = BatchQuery(callbacks=([ - example_callback_handler, - example_callback_handler - ]) - - # and one more time same callback now using the collection API on the batch instance - batch.callbacks.add(example_callback_handler) - - assert len(batch.callbacks) == 1 - assert batch.callbacks == {example_callback_handler} - - # this, of course, also means that if you remove a callback, - # you negate "all additions" of that callback - batch.callbacks.remove(example_callback_handler) - - assert len(batch.callbacks) == 0 - - In order to benefit from the natural deduplication of the callback handlers it is allowed to pass in tuples of - callback and its positional arguments. - This allows one to pre-package additional call arguments for the callable without the need to wrap the callable and - args into a closure or ``functools.partial``. - - .. code-block:: python - - batch = BatchQuery(callbacks=([ - (example_callback_handler, 'some', 'args'), - (example_callback_handler, 'some', 'args'), - (example_callback_handler, 'some other', 'args'), - ]) - - batch.callbacks.add((example_callback_handler, 'some other', 'args')) - - assert len(batch.callbacks) == 2 - assert batch.callbacks == { - (example_callback_handler, 'some', 'args'), - (example_callback_handler, 'some other', 'args') - } - - Note that the tuple callback object format prevents the possibily of essentially same callback firing multiple times, - which might happen if you package same callback and arguments several times with ``functools.partial``, which returns - a new callable with each call. - - .. code-block:: python + def my_callback(*args, **kwargs): + pass - call_args = [] + batch = BatchQuery() - def callback_handler(*args, **kwargs): - call_args.append(list(args)) + batch.add_callback(my_callback) + batch.add_callback(my_callback, 'positional arg', named_arg='named arg value') - batch = BatchQuery(callbacks=([(callback_handler, 'some', 'args')]) + # if you need reference to the batch within the callback, + # just trap it in the arguments to be passed to the callback: + batch.add_callback(my_callback, cqlengine_batch=batch) + # once the batch executes... batch.execute() - assert call_args[0] == ['some', 'args'] - + # the effect of the above scheduled callbacks will be similar to + my_callback() + my_callback('positional arg', named_arg='named arg value') + my_callback(cqlengine_batch=batch) Failure in any of the callbacks does not affect the batch's execution, as the callbacks are started after the execution of the batch is complete and no effort to "roll it back" is made. From 2d22138c0dfa1f115f32d8d308a1dad3a79b0505 Mon Sep 17 00:00:00 2001 From: Daniel Dotsenko Date: Wed, 9 Apr 2014 10:49:04 -0700 Subject: [PATCH 0655/3726] fix for "ValueError: 'column_name' is not in list" error on sync_table for models with primary keys only https://github.com/cqlengine/cqlengine/issues/175 --- cqlengine/management.py | 25 ++++++++++++++++--- cqlengine/tests/management/test_management.py | 21 +++++++++++++++- 2 files changed, 41 insertions(+), 5 deletions(-) diff --git a/cqlengine/management.py b/cqlengine/management.py index 7b40c30f91..ed31eb5759 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -65,6 +65,16 @@ def create_table(model, create_missing_keyspace=True): sync_table(model, create_missing_keyspace) def sync_table(model, create_missing_keyspace=True): + """ + Inspects the model and creates / updates the corresponding table and columns. + + Note that the attributes removed from the model are not deleted on the database. + They become effectively ignored by (will not show up on) the model. + + :param create_missing_keyspace: (Defaults to True) Flags to us that we need to create missing keyspace + mentioned in the model automatically. + :type create_missing_keyspace: bool + """ if model.__abstract__: raise CQLEngineException("cannot create table from abstract model") @@ -238,12 +248,19 @@ def get_fields(model): tmp = con.execute(query, {'ks_name': ks_name, 'col_family': col_family}, ONE) - column_indices = [tmp.columns.index('column_name'), tmp.columns.index('validator')] + # Tables containing only primary keys do not appear to create + # any entries in system.schema_columns, as only non-primary-key attributes + # appear to be inserted into the schema_columns table + if not tmp.results: + return [] + + column_name_positon = tmp.columns.index('column_name') + validator_positon = tmp.columns.index('validator') try: - type_index = tmp.columns.index('type') - return [Field(x[column_indices[0]], x[column_indices[1]]) for x in tmp.results if x[type_index] == 'regular'] + type_position = tmp.columns.index('type') + return [Field(x[column_name_positon], x[validator_positon]) for x in tmp.results if x[type_position] == 'regular'] except ValueError: - return [Field(x[column_indices[0]], x[column_indices[1]]) for x in tmp.results] + return [Field(x[column_name_positon], x[validator_positon]) for x in tmp.results] # convert to Field named tuples diff --git a/cqlengine/tests/management/test_management.py b/cqlengine/tests/management/test_management.py index 25f4d268b6..5179dee3a1 100644 --- a/cqlengine/tests/management/test_management.py +++ b/cqlengine/tests/management/test_management.py @@ -2,7 +2,7 @@ from cqlengine import ONE from cqlengine.exceptions import CQLEngineException -from cqlengine.management import create_table, delete_table, get_fields +from cqlengine.management import create_table, delete_table, get_fields, sync_table from cqlengine.tests.base import BaseCassEngTestCase from cqlengine.connection import ConnectionPool, Host from cqlengine import management @@ -77,6 +77,11 @@ class CapitalizedKeyModel(Model): secondKey = columns.Integer(primary_key=True) someData = columns.Text() +class PrimaryKeysOnlyModel(Model): + first_ey = columns.Integer(primary_key=True) + second_key = columns.Integer(primary_key=True) + + class CapitalizedKeyTest(BaseCassEngTestCase): def test_table_definition(self): @@ -142,3 +147,17 @@ def test_add_column(self): self.assertEqual(len(fields), 4) +class SyncTableTests(BaseCassEngTestCase): + + def setUp(self): + delete_table(PrimaryKeysOnlyModel) + + def test_sync_table_works_with_primary_keys_only_tables(self): + sync_table(PrimaryKeysOnlyModel) + + # primary-keys-only tables do not create entries in system.schema_columns + # table. Only non-primary keys are added to that table. + # Our code must deal with that eventuality properly (not crash) + # on subsequent runs of sync_table (which runs get_fields internally) + get_fields(PrimaryKeysOnlyModel) + sync_table(PrimaryKeysOnlyModel) From 97826f038d8906497dfb5e165d24f9657a9063d5 Mon Sep 17 00:00:00 2001 From: Daniel Dotsenko Date: Wed, 16 Apr 2014 23:05:44 -0700 Subject: [PATCH 0656/3726] more blah for primary-key-only table sync_table test --- cqlengine/tests/management/test_management.py | 25 +++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) diff --git a/cqlengine/tests/management/test_management.py b/cqlengine/tests/management/test_management.py index 5179dee3a1..0ba27cd5a1 100644 --- a/cqlengine/tests/management/test_management.py +++ b/cqlengine/tests/management/test_management.py @@ -8,8 +8,7 @@ from cqlengine import management from cqlengine.tests.query.test_queryset import TestModel from cqlengine.models import Model -from cqlengine import columns - +from cqlengine import columns, SizeTieredCompactionStrategy, LeveledCompactionStrategy class ConnectionPoolFailoverTestCase(BaseCassEngTestCase): """Test cassandra connection pooling.""" @@ -78,6 +77,8 @@ class CapitalizedKeyModel(Model): someData = columns.Text() class PrimaryKeysOnlyModel(Model): + __compaction__ = LeveledCompactionStrategy + first_ey = columns.Integer(primary_key=True) second_key = columns.Integer(primary_key=True) @@ -153,11 +154,31 @@ def setUp(self): delete_table(PrimaryKeysOnlyModel) def test_sync_table_works_with_primary_keys_only_tables(self): + + # This is "create table": + sync_table(PrimaryKeysOnlyModel) + # let's make sure settings persisted correctly: + + assert PrimaryKeysOnlyModel.__compaction__ == LeveledCompactionStrategy + # blows up with DoesNotExist if table does not exist + table_settings = management.get_table_settings(PrimaryKeysOnlyModel) + # let make sure the flag we care about + assert LeveledCompactionStrategy in table_settings['compaction_strategy_class'] + + + # Now we are "updating" the table: + + # setting up something to change + PrimaryKeysOnlyModel.__compaction__ = SizeTieredCompactionStrategy + # primary-keys-only tables do not create entries in system.schema_columns # table. Only non-primary keys are added to that table. # Our code must deal with that eventuality properly (not crash) # on subsequent runs of sync_table (which runs get_fields internally) get_fields(PrimaryKeysOnlyModel) sync_table(PrimaryKeysOnlyModel) + + table_settings = management.get_table_settings(PrimaryKeysOnlyModel) + assert SizeTieredCompactionStrategy in table_settings['compaction_strategy_class'] From 70e58416d0753a081f35e35f6dfacd841afc222d Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 17 Apr 2014 15:27:46 -0700 Subject: [PATCH 0657/3726] cleaning up batch callback docs --- docs/topics/queryset.rst | 20 ++++---------------- 1 file changed, 4 insertions(+), 16 deletions(-) diff --git a/docs/topics/queryset.rst b/docs/topics/queryset.rst index d751152f2a..a8eb2af576 100644 --- a/docs/topics/queryset.rst +++ b/docs/topics/queryset.rst @@ -333,23 +333,11 @@ Batch Query Execution Callbacks In order to allow secondary tasks to be chained to the end of batch, BatchQuery instances allow callbacks to be registered with the batch, to be executed immediately after the batch executes. - Let's say an iterative action over a group of records needs to be followed up by a "maintenance" task like "Update - count of children on parent model." - If your were to kick off that task following manipulation on each child, while the batch is not executed, the reads - the maintenance task would do would operate on old data since the batch is not yet commited. - (You also risk kicking off too many of the same tasks, while only one is needed at the end of the batch.) - - Multiple callbacks can be attached to same BatchQuery instance + Multiple callbacks can be attached to same BatchQuery instance, they are executed in the same order that they + are added to the batch. The callbacks attached to a given batch instance are executed only if the batch executes. If the batch is used as a - context manager and an exception bubbles up, the queued up callbacks will not be run. - - The callback arguments signature is not prescribed. Moreover, the callback API allows trapping the arguments you want to be - passed into the callback. If you need to inspect the batch itself within the callback, simply trap the batch instance - as part of the arguments stored with the callback. - - Internally the callbacks collection is implemented as an ordered collection, which means the execution order follows - the order in which the callbacks are added to the batch. + context manager and an exception is raised, the queued up callbacks will not be run. .. code-block:: python @@ -374,7 +362,7 @@ Batch Query Execution Callbacks my_callback(cqlengine_batch=batch) Failure in any of the callbacks does not affect the batch's execution, as the callbacks are started after the execution - of the batch is complete and no effort to "roll it back" is made. + of the batch is complete. From e9403e562c8651772c1be8960e875ebc4744cb9e Mon Sep 17 00:00:00 2001 From: Nati CT Date: Tue, 22 Apr 2014 15:16:12 +0300 Subject: [PATCH 0658/3726] fixed issue with timestamp and DST --- cqlengine/query.py | 10 ++++++---- cqlengine/statements.py | 3 ++- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index 9e81f5a553..cfc56eef46 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -1,4 +1,5 @@ import copy +import time from datetime import datetime, timedelta from cqlengine import BaseContainerColumn, Map, columns from cqlengine.columns import Counter, List, Set @@ -150,10 +151,11 @@ def execute(self): if isinstance(self.timestamp, (int, long)): ts = self.timestamp - elif isinstance(self.timestamp, timedelta): - ts = long((datetime.now() + self.timestamp - datetime.fromtimestamp(0)).total_seconds() * 1000000) - elif isinstance(self.timestamp, datetime): - ts = long((self.timestamp - datetime.fromtimestamp(0)).total_seconds() * 1000000) + elif isinstance(self.timestamp, (datetime, timedelta)): + ts = self.timestamp + if isinstance(self.timestamp, timedelta): + ts += datetime.now() # Apply timedelta + long(time.mktime(ts.timetuple()) * 1e+6 + ts.microsecond) else: raise ValueError("Batch expects a long, a timedelta, or a datetime") diff --git a/cqlengine/statements.py b/cqlengine/statements.py index 2fb9a88a5d..70321b5818 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -1,3 +1,4 @@ +import time from datetime import datetime, timedelta from cqlengine.functions import QueryValue from cqlengine.operators import BaseWhereOperator, InOperator @@ -496,7 +497,7 @@ def timestamp_normalized(self): else: tmp = self.timestamp - return long(((tmp - datetime.fromtimestamp(0)).total_seconds()) * 1000000) + return long(time.mktime(tmp.timetuple()) * 1e+6 + tmp.microsecond) def __unicode__(self): raise NotImplementedError From dec7c24bb4ba5d98fa696dde91d2737e6607825b Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Tue, 22 Apr 2014 23:12:35 -0700 Subject: [PATCH 0659/3726] timestamp as a long wasn't being assigned --- cqlengine/query.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index cfc56eef46..383b9c0510 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -155,7 +155,7 @@ def execute(self): ts = self.timestamp if isinstance(self.timestamp, timedelta): ts += datetime.now() # Apply timedelta - long(time.mktime(ts.timetuple()) * 1e+6 + ts.microsecond) + ts = long(time.mktime(ts.timetuple()) * 1e+6 + ts.microsecond) else: raise ValueError("Batch expects a long, a timedelta, or a datetime") From e12272515ff1d16eb1ac115821a216a7d261da30 Mon Sep 17 00:00:00 2001 From: Nati CT Date: Sun, 13 Apr 2014 18:46:53 +0300 Subject: [PATCH 0660/3726] Removed cql monkey patch in favour of Quoter to allow Bytes support --- cqlengine/columns.py | 7 ++++++- cqlengine/statements.py | 19 ------------------- 2 files changed, 6 insertions(+), 20 deletions(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index 7888981a97..e78e9255eb 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -207,10 +207,15 @@ def _val_is_null(self, val): class Bytes(Column): db_type = 'blob' + class Quoter(ValueQuoter): + def __str__(self): + return '0x' + self.value.encode('hex') + def to_database(self, value): val = super(Bytes, self).to_database(value) if val is None: return - return '0x' + val.encode('hex') + + return self.Quoter(val) def to_python(self, value): #return value[2:].decode('hex') diff --git a/cqlengine/statements.py b/cqlengine/statements.py index 70321b5818..c7b06945d2 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -7,25 +7,6 @@ class StatementException(Exception): pass -# Monkey patch cql_quote to allow raw hex values -import cql.query - - -def cql_quote_replacement(term): - if isinstance(term, basestring) and term.startswith('0x'): - if isinstance(term, unicode): - return term.encode('utf8') - else: - return term - elif isinstance(term, unicode): - return "'%s'" % cql.query.__escape_quotes(term.encode('utf8')) - elif isinstance(term, (str, bool)): - return "'%s'" % cql.query.__escape_quotes(str(term)) - else: - return str(term) -cql.query.cql_quote = cql_quote_replacement - - class ValueQuoter(object): def __init__(self, value): From c5381ee834e291c827867f832722f63f7df512e9 Mon Sep 17 00:00:00 2001 From: Kevin Deldycke Date: Wed, 23 Apr 2014 11:04:56 +0200 Subject: [PATCH 0661/3726] Tests that adding query with a limit affects the count as expected. --- cqlengine/tests/query/test_queryset.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/cqlengine/tests/query/test_queryset.py b/cqlengine/tests/query/test_queryset.py index 2144dee5ac..97c5c4e8b0 100644 --- a/cqlengine/tests/query/test_queryset.py +++ b/cqlengine/tests/query/test_queryset.py @@ -234,6 +234,13 @@ def test_query_expression_count(self): q = TestModel.objects(TestModel.test_id == 0) assert q.count() == 4 + def test_query_limit_count(self): + """ Tests that adding query with a limit affects the count as expected """ + assert TestModel.objects.count() == 12 + + q = TestModel.objects(TestModel.test_id == 0).limit(2) + assert q.count() == 2 + def test_iteration(self): """ Tests that iterating over a query set pulls back all of the expected results """ q = TestModel.objects(test_id=0) From ece104cc99aa77a1d739ff9e76082f21bcd9750a Mon Sep 17 00:00:00 2001 From: Nati CT Date: Wed, 23 Apr 2014 12:24:05 +0300 Subject: [PATCH 0662/3726] Added a test to show the problem with Text values and '0x' prefix --- cqlengine/tests/columns/test_value_io.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/cqlengine/tests/columns/test_value_io.py b/cqlengine/tests/columns/test_value_io.py index c4958f40dd..4fa36d6cc1 100644 --- a/cqlengine/tests/columns/test_value_io.py +++ b/cqlengine/tests/columns/test_value_io.py @@ -89,6 +89,13 @@ class TestTextIO(BaseColumnIOTest): pkey_val = 'bacon' data_val = 'monkey' + +class TestNonBinaryTextIO(BaseColumnIOTest): + + column = columns.Text + pkey_val = 'bacon' + data_val = '0xmonkey' + class TestInteger(BaseColumnIOTest): column = columns.Integer From 7d76f0138eac2bd13ea1bcd827d2111213088f84 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Wed, 23 Apr 2014 12:45:39 -0700 Subject: [PATCH 0663/3726] updating changelog and version --- changelog | 7 +++++++ cqlengine/VERSION | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/changelog b/changelog index eb99e95382..697b99f5ff 100644 --- a/changelog +++ b/changelog @@ -1,5 +1,12 @@ CHANGELOG +0.13.0 +* adding support for post batch callbacks (thanks Daniel Dotsenko github.com/dvdotsenko) +* fixing sync table for tables with multiple keys (thanks Daniel Dotsenko github.com/dvdotsenko) +* fixing bug in Bytes column (thanks Netanel Cohen-Tzemach github.com/natict) +* fixing bug with timestamps and DST (thanks Netanel Cohen-Tzemach github.com/natict) +* performance improvements (thanks Jon Haddad) + 0.12.0 * Normalize and unquote boolean values. (Thanks Kevin Deldycke github.com/kdeldycke) diff --git a/cqlengine/VERSION b/cqlengine/VERSION index ac454c6a1f..54d1a4f2a4 100644 --- a/cqlengine/VERSION +++ b/cqlengine/VERSION @@ -1 +1 @@ -0.12.0 +0.13.0 From 87ab5bd49959823a89cc6f2b4b8b15e707a8a19e Mon Sep 17 00:00:00 2001 From: Kevin Deldycke Date: Thu, 24 Apr 2014 16:40:54 +0200 Subject: [PATCH 0664/3726] Check number of elements in collections. --- changelog | 4 ++++ cqlengine/columns.py | 12 +++++++++- .../tests/columns/test_container_columns.py | 24 +++++++++++++++++++ 3 files changed, 39 insertions(+), 1 deletion(-) diff --git a/changelog b/changelog index 697b99f5ff..c2d7aa2055 100644 --- a/changelog +++ b/changelog @@ -1,5 +1,9 @@ CHANGELOG +0.14.0-dev + +* Check number of elements in collections. + 0.13.0 * adding support for post batch callbacks (thanks Daniel Dotsenko github.com/dvdotsenko) * fixing sync table for tables with multiple keys (thanks Daniel Dotsenko github.com/dvdotsenko) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index e78e9255eb..79cb373467 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -513,7 +513,9 @@ def to_database(self, value): class BaseContainerColumn(Column): """ - Base Container type + Base Container type for collection-like columns. + + https://cassandra.apache.org/doc/cql3/CQL.html#collections """ def __init__(self, value_type, **kwargs): @@ -537,6 +539,14 @@ def __init__(self, value_type, **kwargs): super(BaseContainerColumn, self).__init__(**kwargs) + def validate(self, value): + value = super(BaseContainerColumn, self).validate(value) + # It is dangerous to let collections have more than 65535. + # See: https://issues.apache.org/jira/browse/CASSANDRA-5428 + if value is not None and len(value) > 65535: + raise ValidationError("Collection can't have more than 65535 elements.") + return value + def get_column_def(self): """ Returns a column definition for CQL table definition diff --git a/cqlengine/tests/columns/test_container_columns.py b/cqlengine/tests/columns/test_container_columns.py index 2f803913f6..63af8ab17d 100644 --- a/cqlengine/tests/columns/test_container_columns.py +++ b/cqlengine/tests/columns/test_container_columns.py @@ -92,6 +92,14 @@ def test_type_validation(self): with self.assertRaises(ValidationError): TestSetModel.create(int_set={'string', True}, text_set={1, 3.0}) + def test_element_count_validation(self): + """ + Tests that big collections are detected and raise an exception. + """ + TestSetModel.create(text_set={str(uuid4()) for i in range(65535)}) + with self.assertRaises(ValidationError): + TestSetModel.create(text_set={str(uuid4()) for i in range(65536)}) + def test_partial_updates(self): """ Tests that partial udpates work as expected """ m1 = TestSetModel.create(int_set={1, 2, 3, 4}) @@ -232,6 +240,14 @@ def test_type_validation(self): with self.assertRaises(ValidationError): TestListModel.create(int_list=['string', True], text_list=[1, 3.0]) + def test_element_count_validation(self): + """ + Tests that big collections are detected and raise an exception. + """ + TestListModel.create(text_list=[str(uuid4()) for i in range(65535)]) + with self.assertRaises(ValidationError): + TestListModel.create(text_list=[str(uuid4()) for i in range(65536)]) + def test_partial_updates(self): """ Tests that partial udpates work as expected """ final = range(10) @@ -423,6 +439,14 @@ def test_type_validation(self): with self.assertRaises(ValidationError): TestMapModel.create(int_map={'key': 2, uuid4(): 'val'}, text_map={2: 5}) + def test_element_count_validation(self): + """ + Tests that big collections are detected and raise an exception. + """ + TestMapModel.create(text_map={str(uuid4()): i for i in range(65535)}) + with self.assertRaises(ValidationError): + TestMapModel.create(text_map={str(uuid4()): i for i in range(65536)}) + def test_partial_updates(self): """ Tests that partial udpates work as expected """ now = datetime.now() From b430157605ca661c44eeaafc55ccc0ace224bc86 Mon Sep 17 00:00:00 2001 From: Kevin Deldycke Date: Thu, 24 Apr 2014 16:41:13 +0200 Subject: [PATCH 0665/3726] Fix test layout. --- .../tests/columns/test_container_columns.py | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/cqlengine/tests/columns/test_container_columns.py b/cqlengine/tests/columns/test_container_columns.py index 63af8ab17d..135ef05796 100644 --- a/cqlengine/tests/columns/test_container_columns.py +++ b/cqlengine/tests/columns/test_container_columns.py @@ -9,12 +9,14 @@ class TestSetModel(Model): + partition = columns.UUID(primary_key=True, default=uuid4) int_set = columns.Set(columns.Integer, required=False) text_set = columns.Set(columns.Text, required=False) class JsonTestColumn(columns.Column): + db_type = 'text' def to_python(self, value): @@ -65,7 +67,6 @@ def test_deleting_last_item_should_succeed(self): m = TestSetModel.get(partition=m.partition) self.assertNotIn(5, m.int_set) - def test_empty_set_retrieval(self): m = TestSetModel.create() m2 = TestSetModel.get(partition=m.partition) @@ -190,12 +191,14 @@ def test_default_empty_container_saving(self): class TestListModel(Model): + partition = columns.UUID(primary_key=True, default=uuid4) int_list = columns.List(columns.Integer, required=False) text_list = columns.List(columns.Text, required=False) class TestListColumn(BaseCassEngTestCase): + @classmethod def setUpClass(cls): super(TestListColumn, cls).setUpClass() @@ -358,16 +361,15 @@ def test_insert_none(self): TestListModel.create(partition=pkey, int_list=[None]) - - - class TestMapModel(Model): + partition = columns.UUID(primary_key=True, default=uuid4) int_map = columns.Map(columns.Integer, columns.UUID, required=False) text_map = columns.Map(columns.Text, columns.DateTime, required=False) class TestMapColumn(BaseCassEngTestCase): + @classmethod def setUpClass(cls): super(TestMapColumn, cls).setUpClass() @@ -391,13 +393,11 @@ def test_add_none_as_map_value(self): with self.assertRaises(ValidationError): TestMapModel.create(int_map={None:1}) - def test_empty_retrieve(self): tmp = TestMapModel.create() tmp2 = TestMapModel.get(partition=tmp.partition) tmp2.int_map['blah'] = 1 - def test_remove_last_entry_works(self): tmp = TestMapModel.create() tmp.text_map["blah"] = datetime.now() @@ -408,8 +408,6 @@ def test_remove_last_entry_works(self): tmp = TestMapModel.get(partition=tmp.partition) self.assertNotIn("blah", tmp.int_map) - - def test_io_success(self): """ Tests that a basic usage works as expected """ k1 = uuid4() @@ -474,7 +472,6 @@ def test_updates_from_none(self): m.int_map = expected m.save() - m2 = TestMapModel.get(partition=m.partition) assert m2.int_map == expected @@ -547,11 +544,15 @@ def test_default_empty_container_saving(self): # assert len([s for s in statements if '"TEST" = "TEST" +' in s]) == 1 # assert len([s for s in statements if '+ "TEST"' in s]) == 1 + class TestCamelMapModel(Model): + partition = columns.UUID(primary_key=True, default=uuid4) camelMap = columns.Map(columns.Text, columns.Integer, required=False) + class TestCamelMapColumn(BaseCassEngTestCase): + @classmethod def setUpClass(cls): super(TestCamelMapColumn, cls).setUpClass() @@ -565,4 +566,3 @@ def tearDownClass(cls): def test_camelcase_column(self): TestCamelMapModel.create(partition=None, camelMap={'blah': 1}) - From 2af6b9b55167e90e0bfa4483a82123db49b1366b Mon Sep 17 00:00:00 2001 From: Kevin Deldycke Date: Wed, 16 Apr 2014 16:50:37 +0200 Subject: [PATCH 0666/3726] Add support for simple table properties. Closes #181 and #191. --- cqlengine/__init__.py | 6 ++++- cqlengine/management.py | 18 ++++++++----- cqlengine/models.py | 14 ++++++++-- docs/topics/models.rst | 58 +++++++++++++++++++++++++++++++++++++++++ 4 files changed, 87 insertions(+), 9 deletions(-) diff --git a/cqlengine/__init__.py b/cqlengine/__init__.py index cd9f8f3557..e1df6aff81 100644 --- a/cqlengine/__init__.py +++ b/cqlengine/__init__.py @@ -14,6 +14,11 @@ SizeTieredCompactionStrategy = "SizeTieredCompactionStrategy" LeveledCompactionStrategy = "LeveledCompactionStrategy" +# Caching constants. +ALL = "all" +KEYS_ONLY = "keys_only" +ROWS_ONLY = "rows_only" +NONE = "none" ANY = "ANY" ONE = "ONE" @@ -23,4 +28,3 @@ LOCAL_QUORUM = "LOCAL_QUORUM" EACH_QUORUM = "EACH_QUORUM" ALL = "ALL" - diff --git a/cqlengine/management.py b/cqlengine/management.py index ed31eb5759..6c9ff5939d 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -170,22 +170,28 @@ def add_column(col): qs += ['({})'.format(', '.join(qtypes))] - with_qs = ['read_repair_chance = {}'.format(model.__read_repair_chance__)] + with_qs = [] - _order = ['"{}" {}'.format(c.db_field_name, c.clustering_order or 'ASC') for c in model._clustering_keys.values()] + table_properties = ['bloom_filter_fp_chance', 'caching', 'comment', + 'dclocal_read_repair_chance', 'gc_grace_seconds', 'read_repair_chance', + 'replicate_on_write'] + for prop_name in table_properties: + prop_value = getattr(model, '__{}__'.format(prop_name), None) + if prop_value is not None: + with_qs.append('{} = {}'.format(prop_name, prop_value)) + _order = ['"{}" {}'.format(c.db_field_name, c.clustering_order or 'ASC') for c in model._clustering_keys.values()] if _order: with_qs.append('clustering order by ({})'.format(', '.join(_order))) compaction_options = get_compaction_options(model) - if compaction_options: compaction_options = json.dumps(compaction_options).replace('"', "'") with_qs.append("compaction = {}".format(compaction_options)) - # add read_repair_chance - qs += ['WITH {}'.format(' AND '.join(with_qs))] - + # Add table properties. + if with_qs: + qs += ['WITH {}'.format(' AND '.join(with_qs))] qs = ' '.join(qs) return qs diff --git a/cqlengine/models.py b/cqlengine/models.py index 6f6662863e..6723c5c2cf 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -255,8 +255,18 @@ class MultipleObjectsReturned(_MultipleObjectsReturned): pass #__ttl__ = None # this doesn't seem to be used __consistency__ = None # can be set per query - __read_repair_chance__ = 0.1 - + # Additional table properties + __bloom_filter_fp_chance__ = None + __caching__ = None + __comment__ = None + __dclocal_read_repair_chance__ = None + __default_time_to_live__ = None + __gc_grace_seconds__ = None + __index_interval__ = None + __memtable_flush_period_in_ms__ = None + __populate_io_cache_on_flush__ = None + __read_repair_chance__ = None + __replicate_on_write__ = None _timestamp = None # optional timestamp to include with the operation (USING TIMESTAMP) diff --git a/docs/topics/models.rst b/docs/topics/models.rst index f6695d749b..14f905b40d 100644 --- a/docs/topics/models.rst +++ b/docs/topics/models.rst @@ -260,6 +260,63 @@ Extending Model Validation if validation fails +Table Properties +================ + + Each table can have its own set of configuration options. + These can be specified on a model with the following attributes: + + .. attribute:: Model.__bloom_filter_fp_chance + + .. attribute:: Model.__caching__ + + .. attribute:: Model.__comment__ + + .. attribute:: Model.__dclocal_read_repair_chance__ + + .. attribute:: Model.__default_time_to_live__ + + .. attribute:: Model.__gc_grace_seconds__ + + .. attribute:: Model.__index_interval__ + + .. attribute:: Model.__memtable_flush_period_in_ms__ + + .. attribute:: Model.__populate_io_cache_on_flush__ + + .. attribute:: Model.__read_repair_chance__ + + .. attribute:: Model.__replicate_on_write__ + + Example: + + .. code-block:: python + + from cqlengine import ROWS_ONLY, columns + from cqlengine.models import Model + + class User(Model): + __caching__ = ROWS_ONLY # cache only rows instead of keys only by default + __gc_grace_seconds__ = 86400 # 1 day instead of the default 10 days + + user_id = columns.UUID(primary_key=True) + name = columns.Text() + + Will produce the following CQL statement: + + .. code-block:: sql + + CREATE TABLE cqlengine.user ( + user_id uuid, + name text, + PRIMARY KEY (user_id) + ) WITH caching = 'rows_only' + AND gc_grace_seconds = 86400; + + See the `list of supported table properties for more information + `_. + + Compaction Options ================== @@ -311,6 +368,7 @@ Compaction Options Tables may use `LeveledCompactionStrategy` or `SizeTieredCompactionStrategy`. Both options are available in the top level cqlengine module. To reiterate, you will need to set your `__compaction__` option explicitly in order for cqlengine to handle any of your settings. + Manipulating model instances as dictionaries ============================================ From d407d1b2feee6c2671f12dc2972f00e2421bc996 Mon Sep 17 00:00:00 2001 From: Kevin Deldycke Date: Thu, 24 Apr 2014 19:03:11 +0200 Subject: [PATCH 0667/3726] Update changelog. --- changelog | 1 + 1 file changed, 1 insertion(+) diff --git a/changelog b/changelog index c2d7aa2055..34c6d9cc43 100644 --- a/changelog +++ b/changelog @@ -3,6 +3,7 @@ CHANGELOG 0.14.0-dev * Check number of elements in collections. +* Add support for table properties. 0.13.0 * adding support for post batch callbacks (thanks Daniel Dotsenko github.com/dvdotsenko) From 5338f483cf2eac8eb71c0ba3fbd1efabee7cb3f4 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Fri, 2 May 2014 15:13:31 -0700 Subject: [PATCH 0668/3726] merged optimizations --- cqlengine/columns.py | 6 +++--- cqlengine/connection.py | 6 +++--- cqlengine/models.py | 13 +++++++++---- cqlengine/tests/model/test_class_construction.py | 11 +++++++++++ 4 files changed, 26 insertions(+), 10 deletions(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index 79cb373467..96ac7cc6c6 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -1,5 +1,5 @@ #column field types -from copy import deepcopy +from copy import deepcopy, copy from datetime import datetime from datetime import date import re @@ -33,7 +33,7 @@ def changed(self): return self.value != self.previous_value def reset_previous_value(self): - self.previous_value = deepcopy(self.value) + self.previous_value = copy(self.value) def getval(self): return self.value @@ -394,7 +394,7 @@ def validate(self, value): from uuid import UUID as _UUID if isinstance(val, _UUID): return val if isinstance(val, basestring) and self.re_uuid.match(val): - return _UUID(val) + return _UUID(val) raise ValidationError("{} is not a valid uuid".format(value)) def to_python(self, value): diff --git a/cqlengine/connection.py b/cqlengine/connection.py index e03a35b136..0b4fa7eca3 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -157,10 +157,10 @@ def put(self, conn): :type conn: connection """ - if self._queue.full(): + try: + self._queue.put(conn, block=False) + except queue.Full: conn.close() - else: - self._queue.put(conn) def _create_transport(self, host): """ diff --git a/cqlengine/models.py b/cqlengine/models.py index 6f6662863e..a0df093c75 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -179,10 +179,9 @@ def __get__(self, instance, owner): :param instance: the model instance :type instance: Model """ - - if instance: + try: return instance._values[self.column.column_name].getval() - else: + except AttributeError as e: return self.query_evaluator def __set__(self, instance, value): @@ -222,6 +221,8 @@ class MultipleObjectsReturned(_MultipleObjectsReturned): pass # custom timestamps, see USING TIMESTAMP X timestamp = TimestampDescriptor() + # _len is lazily created by __len__ + # table names will be generated automatically from it's model # however, you can also define them manually here __table_name__ = None @@ -446,7 +447,11 @@ def __setitem__(self, key, val): def __len__(self): """ Returns the number of columns defined on that model. """ - return len(self._columns.keys()) + try: + return self._len + except: + self._len = len(self._columns.keys()) + return self._len def keys(self): """ Returns list of column's IDs. """ diff --git a/cqlengine/tests/model/test_class_construction.py b/cqlengine/tests/model/test_class_construction.py index 6ae46753ec..517798f2f4 100644 --- a/cqlengine/tests/model/test_class_construction.py +++ b/cqlengine/tests/model/test_class_construction.py @@ -330,6 +330,17 @@ class CDQModel(Model): CDQModel().save() +class TestCachedLengthIsNotCarriedToSubclasses(BaseCassEngTestCase): + def test_subclassing(self): + + length = len(ConcreteModelWithCol()) + + class AlreadyLoadedTest(ConcreteModelWithCol): + new_field = columns.Integer() + + self.assertGreater(len(AlreadyLoadedTest()), length) + + From 884745224e896da644f4aaebd5a55360d0e38638 Mon Sep 17 00:00:00 2001 From: tarzan Date: Mon, 5 May 2014 10:46:23 +0700 Subject: [PATCH 0669/3726] Log query (DEBUG level) before execute. --- cqlengine/connection.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index e03a35b136..2f7d940ced 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -221,11 +221,11 @@ def execute(self, query, params, consistency_level=None): con = self.get() if not con: raise CQLEngineException("Error calling execute without calling setup.") + LOG.debug('{} {}'.format(query, repr(params))) cur = con.cursor() cur.execute(query, params, consistency_level=consistency_level) columns = [i[0] for i in cur.description or []] results = [RowResult(r) for r in cur.fetchall()] - LOG.debug('{} {}'.format(query, repr(params))) self.put(con) return QueryResult(columns, results) except CQLConnectionError as ex: From dce28bd05c02da0e403e83d78dcd47dd8a171593 Mon Sep 17 00:00:00 2001 From: Nati CT Date: Thu, 8 May 2014 11:03:01 +0300 Subject: [PATCH 0670/3726] Changed connection error messages to be more informative --- cqlengine/connection.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index 081f167fac..1274e3e7fa 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -94,9 +94,9 @@ def setup( try: port = int(host[1]) except ValueError: - raise CQLConnectionError("Can't parse {}".format(''.join(host))) + raise CQLConnectionError("Can't parse port as int {}".format(':'.join(host))) else: - raise CQLConnectionError("Can't parse {}".format(''.join(host))) + raise CQLConnectionError("Can't parse host string {}".format(':'.join(host))) _hosts.append(Host(host[0], port)) From 7359a87be04ed78293c7fe2947ac2857efa214fc Mon Sep 17 00:00:00 2001 From: Michael Cyrulnik Date: Thu, 8 May 2014 09:22:34 -0400 Subject: [PATCH 0671/3726] test to show blind update of empty map causes AttributeError --- cqlengine/tests/columns/test_container_columns.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/cqlengine/tests/columns/test_container_columns.py b/cqlengine/tests/columns/test_container_columns.py index 135ef05796..6637be1ece 100644 --- a/cqlengine/tests/columns/test_container_columns.py +++ b/cqlengine/tests/columns/test_container_columns.py @@ -480,6 +480,21 @@ def test_updates_from_none(self): m3 = TestMapModel.get(partition=m.partition) assert m3.int_map != expected + def test_blind_updates_from_none(self): + """ Tests that updates from None work as expected """ + m = TestMapModel.create(int_map=None) + expected = {1: uuid4()} + m.int_map = expected + m.save() + + m2 = TestMapModel.get(partition=m.partition) + assert m2.int_map == expected + + TestMapModel.objects(partition=m.partition).update(int_map={}) + + m3 = TestMapModel.get(partition=m.partition) + assert m3.int_map != expected + def test_updates_to_none(self): """ Tests that setting the field to None works as expected """ m = TestMapModel.create(int_map={1: uuid4()}) From a4143795d22fb7ab177f94aa67554f63eedb2fe6 Mon Sep 17 00:00:00 2001 From: Michael Cyrulnik Date: Thu, 8 May 2014 12:55:12 -0400 Subject: [PATCH 0672/3726] test to show blind update of empty set causes CQLEngineException --- cqlengine/tests/columns/test_container_columns.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/cqlengine/tests/columns/test_container_columns.py b/cqlengine/tests/columns/test_container_columns.py index 6637be1ece..d1531e7c53 100644 --- a/cqlengine/tests/columns/test_container_columns.py +++ b/cqlengine/tests/columns/test_container_columns.py @@ -67,6 +67,16 @@ def test_deleting_last_item_should_succeed(self): m = TestSetModel.get(partition=m.partition) self.assertNotIn(5, m.int_set) + def test_blind_deleting_last_item_should_succeed(self): + m = TestSetModel.create() + m.int_set.add(5) + m.save() + + TestSetModel.objects(partition=m.partition).update(int_set=set()) + + m = TestSetModel.get(partition=m.partition) + self.assertNotIn(5, m.int_set) + def test_empty_set_retrieval(self): m = TestSetModel.create() m2 = TestSetModel.get(partition=m.partition) From 161eda98a0ce6eb978649f096541eda0f02ff6aa Mon Sep 17 00:00:00 2001 From: Kevin Deldycke Date: Thu, 22 May 2014 15:33:45 +0200 Subject: [PATCH 0673/3726] Add missing table properties. --- cqlengine/management.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/cqlengine/management.py b/cqlengine/management.py index 6c9ff5939d..dc2440ddc6 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -173,8 +173,9 @@ def add_column(col): with_qs = [] table_properties = ['bloom_filter_fp_chance', 'caching', 'comment', - 'dclocal_read_repair_chance', 'gc_grace_seconds', 'read_repair_chance', - 'replicate_on_write'] + 'dclocal_read_repair_chance', 'default_time_to_live', 'gc_grace_seconds', + 'index_interval', 'memtable_flush_period_in_ms', 'populate_io_cache_on_flush', + 'read_repair_chance', 'replicate_on_write'] for prop_name in table_properties: prop_value = getattr(model, '__{}__'.format(prop_name), None) if prop_value is not None: From 7ed8fbdb5f0fc5a77cc7296db0f3d3718b00b1da Mon Sep 17 00:00:00 2001 From: Kevin Deldycke Date: Thu, 22 May 2014 15:34:18 +0200 Subject: [PATCH 0674/3726] Uppercase constants. That's how they're normalized by Cassandra. --- cqlengine/__init__.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/cqlengine/__init__.py b/cqlengine/__init__.py index e1df6aff81..e1a1859745 100644 --- a/cqlengine/__init__.py +++ b/cqlengine/__init__.py @@ -15,10 +15,10 @@ LeveledCompactionStrategy = "LeveledCompactionStrategy" # Caching constants. -ALL = "all" -KEYS_ONLY = "keys_only" -ROWS_ONLY = "rows_only" -NONE = "none" +ALL = "ALL" +KEYS_ONLY = "KEYS_ONLY" +ROWS_ONLY = "ROWS_ONLY" +NONE = "NONE" ANY = "ANY" ONE = "ONE" From 44f6ea7bf7c1f82044cf6a0da1f84dc38c553d00 Mon Sep 17 00:00:00 2001 From: Kevin Deldycke Date: Thu, 22 May 2014 16:13:32 +0200 Subject: [PATCH 0675/3726] Unit test table properties setting. --- cqlengine/tests/management/test_management.py | 36 ++++++++++++++++++- 1 file changed, 35 insertions(+), 1 deletion(-) diff --git a/cqlengine/tests/management/test_management.py b/cqlengine/tests/management/test_management.py index 0ba27cd5a1..f64efa67f2 100644 --- a/cqlengine/tests/management/test_management.py +++ b/cqlengine/tests/management/test_management.py @@ -1,6 +1,9 @@ +import random +import string + from mock import MagicMock, patch -from cqlengine import ONE +from cqlengine import ONE, ALL, KEYS_ONLY, ROWS_ONLY, NONE from cqlengine.exceptions import CQLEngineException from cqlengine.management import create_table, delete_table, get_fields, sync_table from cqlengine.tests.base import BaseCassEngTestCase @@ -10,6 +13,7 @@ from cqlengine.models import Model from cqlengine import columns, SizeTieredCompactionStrategy, LeveledCompactionStrategy + class ConnectionPoolFailoverTestCase(BaseCassEngTestCase): """Test cassandra connection pooling.""" @@ -148,6 +152,36 @@ def test_add_column(self): self.assertEqual(len(fields), 4) +class TablePropertiesTests(BaseCassEngTestCase): + + table_properties = { + 'bloom_filter_fp_chance': random.randint(0, 99999) / 100000.0, + 'caching': random.choice([ALL, KEYS_ONLY, ROWS_ONLY, NONE]), + 'comment': ''.join(random.choice(string.ascii_letters) + for i in range(random.randint(4, 99))), + 'dclocal_read_repair_chance': random.randint(0, 99999) / 100000.0, + 'default_time_to_live': random.randint(0, 99999), + 'gc_grace_seconds': random.randint(0, 99999), + 'index_interval': random.randint(0, 99999), + 'memtable_flush_period_in_ms': random.randint(0, 99999), + 'populate_io_cache_on_flush': random.choice([True, False]), + 'read_repair_chance': random.randint(0, 99999) / 100000.0, + 'replicate_on_write': random.choice([True, False]), + } + + def setUp(self): + delete_table(FirstModel) + + def test_set_table_properties(self): + for property_name, property_value in self.table_properties.items(): + property_id = '__{}__'.format(property_name) + setattr(FirstModel, property_id, property_value) + sync_table(FirstModel) + table_settings = management.get_table_settings(FirstModel) + for property_name, property_value in self.table_properties.items(): + assert table_settings[property_name] == property_value + + class SyncTableTests(BaseCassEngTestCase): def setUp(self): From ea9ca3abff15fb9244adbb3227dc0bfe6ba2acfa Mon Sep 17 00:00:00 2001 From: Kevin Deldycke Date: Thu, 22 May 2014 16:14:24 +0200 Subject: [PATCH 0676/3726] Table setting strings needs to be single quoted. --- cqlengine/management.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/cqlengine/management.py b/cqlengine/management.py index dc2440ddc6..db445290ee 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -179,7 +179,10 @@ def add_column(col): for prop_name in table_properties: prop_value = getattr(model, '__{}__'.format(prop_name), None) if prop_value is not None: - with_qs.append('{} = {}'.format(prop_name, prop_value)) + # Strings needs to be single quoted + if isinstance(prop_value, basestring): + prop_value = "'{}'".format(prop_value) + with_qs.append("{} = {}".format(prop_name, prop_value)) _order = ['"{}" {}'.format(c.db_field_name, c.clustering_order or 'ASC') for c in model._clustering_keys.values()] if _order: From f5eab308167e1a7ae96e0a95eea51ee81011b7c4 Mon Sep 17 00:00:00 2001 From: Kevin Deldycke Date: Thu, 22 May 2014 16:14:57 +0200 Subject: [PATCH 0677/3726] Handle mismatching schema table property name. --- cqlengine/tests/management/test_management.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/cqlengine/tests/management/test_management.py b/cqlengine/tests/management/test_management.py index f64efa67f2..f0d7c6e909 100644 --- a/cqlengine/tests/management/test_management.py +++ b/cqlengine/tests/management/test_management.py @@ -179,6 +179,11 @@ def test_set_table_properties(self): sync_table(FirstModel) table_settings = management.get_table_settings(FirstModel) for property_name, property_value in self.table_properties.items(): + if property_name == 'dclocal_read_repair_chance': + # For some reason 'dclocal_read_repair_chance' in CQL is called + # just 'local_read_repair_chance' in the schema table. + # Source: https://issues.apache.org/jira/browse/CASSANDRA-6717 + property_name = 'local_read_repair_chance' assert table_settings[property_name] == property_value From d1f579b3ce71a9a62b1de8ad873be3c8c764cba5 Mon Sep 17 00:00:00 2001 From: Michael Cyrulnik Date: Fri, 30 May 2014 13:28:46 -0400 Subject: [PATCH 0678/3726] Can blind update a map column such that it becomes an empty map. --- cqlengine/columns.py | 3 +++ cqlengine/statements.py | 27 ++++++++++++++++++--------- 2 files changed, 21 insertions(+), 9 deletions(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index 96ac7cc6c6..83caac6b85 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -799,6 +799,9 @@ def get(self, key): def keys(self): return self.value.keys() + def items(self): + return self.value.items() + def __init__(self, key_type, value_type, default=dict, **kwargs): """ :param key_type: a column class indicating the types of the key diff --git a/cqlengine/statements.py b/cqlengine/statements.py index c7b06945d2..7ad022de8e 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -320,7 +320,10 @@ def _analyze(self): if self._operation == "update": self._updates = self.value.keys() else: - self._updates = sorted([k for k, v in self.value.items() if v != self.previous.get(k)]) or None + if self.value.items(): + self._updates = sorted([k for k, v in self.value.items() if v != self.previous.get(k)]) or None + else: + self._updates = [] self._analyzed = True def get_context_size(self): @@ -330,20 +333,26 @@ def get_context_size(self): def update_context(self, ctx): if not self._analyzed: self._analyze() ctx_id = self.context_id - for key in self._updates or []: - val = self.value.get(key) - ctx[str(ctx_id)] = self._column.key_col.to_database(key) if self._column else key - ctx[str(ctx_id + 1)] = self._column.value_col.to_database(val) if self._column else val - ctx_id += 2 + if self._updates is None or self._updates: + for key in self._updates or []: + val = self.value.get(key) + ctx[str(ctx_id)] = self._column.key_col.to_database(key) if self._column else key + ctx[str(ctx_id + 1)] = self._column.value_col.to_database(val) if self._column else val + ctx_id += 2 + else: + ctx[str(ctx_id)] = {} def __unicode__(self): if not self._analyzed: self._analyze() qs = [] ctx_id = self.context_id - for _ in self._updates or []: - qs += ['"{}"[:{}] = :{}'.format(self.field, ctx_id, ctx_id + 1)] - ctx_id += 2 + if self._updates is None or self._updates: + for _ in self._updates or []: + qs += ['"{}"[:{}] = :{}'.format(self.field, ctx_id, ctx_id + 1)] + ctx_id += 2 + else: + qs += ['"int_map" = :1'] return ', '.join(qs) From 011e491d5fa07579749878acb54bdfecd6c617af Mon Sep 17 00:00:00 2001 From: Michael Cyrulnik Date: Fri, 30 May 2014 14:14:28 -0400 Subject: [PATCH 0679/3726] Can blind update a set column such that it becomes an empty set. --- cqlengine/statements.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/cqlengine/statements.py b/cqlengine/statements.py index 7ad022de8e..c2ff767428 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -169,6 +169,8 @@ def __init__(self, field, value, operation=None, previous=None, column=None): def __unicode__(self): qs = [] ctx_id = self.context_id + if self.previous is None and not (self._assignments or self._additions or self._removals): + qs += ['"{}" = :{}'.format(self.field, ctx_id)] if self._assignments: qs += ['"{}" = :{}'.format(self.field, ctx_id)] ctx_id += 1 @@ -203,6 +205,8 @@ def get_context_size(self): def update_context(self, ctx): if not self._analyzed: self._analyze() ctx_id = self.context_id + if self.previous is None and not (self._assignments or self._additions or self._removals): + ctx[str(ctx_id)] = self._to_database({}) if self._assignments: ctx[str(ctx_id)] = self._to_database(self._assignments) ctx_id += 1 From 0f11c430a7f3c74c432db5c2ef99493e6caeff7c Mon Sep 17 00:00:00 2001 From: Michael Cyrulnik Date: Fri, 30 May 2014 14:19:25 -0400 Subject: [PATCH 0680/3726] test for blind update of empty list --- cqlengine/tests/columns/test_container_columns.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/cqlengine/tests/columns/test_container_columns.py b/cqlengine/tests/columns/test_container_columns.py index d1531e7c53..838f2eb298 100644 --- a/cqlengine/tests/columns/test_container_columns.py +++ b/cqlengine/tests/columns/test_container_columns.py @@ -370,6 +370,20 @@ def test_insert_none(self): with self.assertRaises(ValidationError): TestListModel.create(partition=pkey, int_list=[None]) + def test_blind_list_updates_from_none(self): + """ Tests that updates from None work as expected """ + m = TestListModel.create(int_list=None) + expected = [1, 2] + m.int_list = expected + m.save() + + m2 = TestListModel.get(partition=m.partition) + assert m2.int_list == expected + + TestListModel.objects(partition=m.partition).update(int_list=[]) + + m3 = TestListModel.get(partition=m.partition) + assert m3.int_list == [] class TestMapModel(Model): From 635879df6315e5a7f31a1c022dc6c89c68b67dd1 Mon Sep 17 00:00:00 2001 From: Michael Cyrulnik Date: Fri, 30 May 2014 14:28:49 -0400 Subject: [PATCH 0681/3726] slight change to way blind update to empty handled in MapUpdateClause --- cqlengine/statements.py | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/cqlengine/statements.py b/cqlengine/statements.py index c2ff767428..8530015389 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -318,45 +318,47 @@ class MapUpdateClause(ContainerUpdateClause): def __init__(self, field, value, operation=None, previous=None, column=None): super(MapUpdateClause, self).__init__(field, value, operation, previous, column=column) self._updates = None - self.previous = self.previous or {} + self.previous = self.previous def _analyze(self): if self._operation == "update": self._updates = self.value.keys() else: - if self.value.items(): - self._updates = sorted([k for k, v in self.value.items() if v != self.previous.get(k)]) or None + if self.previous is None: + self._updates = sorted([k for k, v in self.value.items()]) else: - self._updates = [] + self._updates = sorted([k for k, v in self.value.items() if v != self.previous.get(k)]) or None self._analyzed = True def get_context_size(self): if not self._analyzed: self._analyze() + if self.previous is None: + return 1 return len(self._updates or []) * 2 def update_context(self, ctx): if not self._analyzed: self._analyze() ctx_id = self.context_id - if self._updates is None or self._updates: + if self.previous is None and not self._updates: + ctx[str(ctx_id)] = {} + else: for key in self._updates or []: val = self.value.get(key) ctx[str(ctx_id)] = self._column.key_col.to_database(key) if self._column else key ctx[str(ctx_id + 1)] = self._column.value_col.to_database(val) if self._column else val ctx_id += 2 - else: - ctx[str(ctx_id)] = {} def __unicode__(self): if not self._analyzed: self._analyze() qs = [] ctx_id = self.context_id - if self._updates is None or self._updates: + if self.previous is None and not self._updates: + qs += ['"int_map" = :1'] + else: for _ in self._updates or []: qs += ['"{}"[:{}] = :{}'.format(self.field, ctx_id, ctx_id + 1)] ctx_id += 2 - else: - qs += ['"int_map" = :1'] return ', '.join(qs) From b00f84e284d9a36d4a827668b8c98fc13d240926 Mon Sep 17 00:00:00 2001 From: Michael Cyrulnik Date: Fri, 30 May 2014 14:30:24 -0400 Subject: [PATCH 0682/3726] fixing get_context_size for blind update to None in SetUpdateClause --- cqlengine/statements.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cqlengine/statements.py b/cqlengine/statements.py index 8530015389..dbae45ba15 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -200,6 +200,8 @@ def _analyze(self): def get_context_size(self): if not self._analyzed: self._analyze() + if self.previous is None and not (self._assignments or self._additions or self._removals): + return 1 return int(bool(self._assignments)) + int(bool(self._additions)) + int(bool(self._removals)) def update_context(self, ctx): From 2695377da9aab231d8e9d2ee3d8767cf3145e0c8 Mon Sep 17 00:00:00 2001 From: Michael Cyrulnik Date: Fri, 30 May 2014 14:37:38 -0400 Subject: [PATCH 0683/3726] slight fix to MapUpdateClause context size. Fix MapUpdateClause assignment clause test. --- cqlengine/statements.py | 2 +- cqlengine/tests/statements/test_assignment_clauses.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cqlengine/statements.py b/cqlengine/statements.py index dbae45ba15..31d1ff088f 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -334,7 +334,7 @@ def _analyze(self): def get_context_size(self): if not self._analyzed: self._analyze() - if self.previous is None: + if self.previous is None and not self._updates: return 1 return len(self._updates or []) * 2 diff --git a/cqlengine/tests/statements/test_assignment_clauses.py b/cqlengine/tests/statements/test_assignment_clauses.py index 64b35f892b..126dd9869a 100644 --- a/cqlengine/tests/statements/test_assignment_clauses.py +++ b/cqlengine/tests/statements/test_assignment_clauses.py @@ -235,7 +235,7 @@ def test_shrinking_list_update(self): class MapUpdateTests(TestCase): def test_update(self): - c = MapUpdateClause('s', {3: 0, 5: 6}, {5: 0, 3: 4}) + c = MapUpdateClause('s', {3: 0, 5: 6}, previous={5: 0, 3: 4}) c._analyze() c.set_context_id(0) From d075a487c79f73e94414c143ee356e146f0214a8 Mon Sep 17 00:00:00 2001 From: Caleb Rackliffe Date: Tue, 3 Jun 2014 01:55:00 -0700 Subject: [PATCH 0684/3726] A ModelDefinitionException is now thrown from the metaclass at model definition time if the model contains a column whose name conflicts with a built-in attribute or method. (See https://github.com/cqlengine/cqlengine/issues/193) --- cqlengine/models.py | 7 +++++-- cqlengine/tests/model/test_model.py | 20 +++++++++++++++++++- 2 files changed, 24 insertions(+), 3 deletions(-) diff --git a/cqlengine/models.py b/cqlengine/models.py index a0df093c75..7791da0ec8 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -649,9 +649,12 @@ def _get_polymorphic_base(bases): has_partition_keys = any(v.partition_key for (k, v) in column_definitions) - #TODO: check that the defined columns don't conflict with any of the Model API's existing attributes/methods #transform column definitions for k, v in column_definitions: + # don't allow a column with the same name as a built-in attribute or method + if k in BaseModel.__dict__: + raise ModelDefinitionException("column '{}' conflicts with built-in attribute/method".format(k)) + # counter column primary keys are not allowed if (v.primary_key or v.partition_key) and isinstance(v, (columns.Counter, columns.BaseContainerColumn)): raise ModelDefinitionException('counter columns and container columns cannot be used as primary keys') @@ -661,7 +664,7 @@ def _get_polymorphic_base(bases): if not has_partition_keys and v.primary_key: v.partition_key = True has_partition_keys = True - _transform_column(k,v) + _transform_column(k, v) partition_keys = OrderedDict(k for k in primary_keys.items() if k[1].partition_key) clustering_keys = OrderedDict(k for k in primary_keys.items() if not k[1].partition_key) diff --git a/cqlengine/tests/model/test_model.py b/cqlengine/tests/model/test_model.py index a0a6929a27..1e18359ec0 100644 --- a/cqlengine/tests/model/test_model.py +++ b/cqlengine/tests/model/test_model.py @@ -1,6 +1,6 @@ from unittest import TestCase -from cqlengine.models import Model +from cqlengine.models import Model, ModelDefinitionException from cqlengine import columns @@ -31,3 +31,21 @@ class EqualityModel1(Model): self.assertEqual(m0, m0) self.assertNotEqual(m0, m1) + + +class BuiltInAttributeConflictTest(TestCase): + """tests Model definitions that conflict with built-in attributes/methods""" + + def test_model_with_attribute_name_conflict(self): + """should raise exception when model defines column that conflicts with built-in attribute""" + with self.assertRaises(ModelDefinitionException): + class IllegalTimestampColumnModel(Model): + my_primary_key = columns.Integer(primary_key=True) + timestamp = columns.BigInt() + + def test_model_with_method_name_conflict(self): + """should raise exception when model defines column that conflicts with built-in method""" + with self.assertRaises(ModelDefinitionException): + class IllegalFilterColumnModel(Model): + my_primary_key = columns.Integer(primary_key=True) + filter = columns.Text() \ No newline at end of file From d3ca9614fda54ce0539b6fce3eae82a3dbe7e266 Mon Sep 17 00:00:00 2001 From: Caleb Rackliffe Date: Tue, 3 Jun 2014 02:26:19 -0700 Subject: [PATCH 0685/3726] The pip plumbing for the sure library seems to be horribly broken in 1.2.7, so I propose pegging to the stable version and getting the Travis tests working again. See https://github.com/gabrielfalcao/sure/pull/60 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 1fd68cdd9c..d813898e67 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,4 +3,4 @@ ipython==0.13.1 ipdb==0.7 Sphinx==1.1.3 mock==1.0.1 -sure +sure==1.2.5 From f8fd7942334775f138323b6478a1b07430b3e2b8 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Mon, 9 Jun 2014 14:34:15 -0700 Subject: [PATCH 0686/3726] updated changelog --- changelog | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/changelog b/changelog index c2d7aa2055..12e5e527a3 100644 --- a/changelog +++ b/changelog @@ -1,15 +1,18 @@ CHANGELOG -0.14.0-dev +0.14.0 + +* fix for setting map to empty (Lifto) +* report when creating models with attributes that conflict with cqlengine (maedhroz) +* use stable version of sure package (maedhroz) +* performance improvements -* Check number of elements in collections. 0.13.0 * adding support for post batch callbacks (thanks Daniel Dotsenko github.com/dvdotsenko) * fixing sync table for tables with multiple keys (thanks Daniel Dotsenko github.com/dvdotsenko) * fixing bug in Bytes column (thanks Netanel Cohen-Tzemach github.com/natict) * fixing bug with timestamps and DST (thanks Netanel Cohen-Tzemach github.com/natict) -* performance improvements (thanks Jon Haddad) 0.12.0 From 4eb4e74bf7b55d9c0d36cf91cc4bc5fcf93cafae Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Mon, 9 Jun 2014 15:14:53 -0700 Subject: [PATCH 0687/3726] added native driver to requirements --- requirements.txt | 1 + setup.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index d813898e67..62eba662b1 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,3 +4,4 @@ ipdb==0.7 Sphinx==1.1.3 mock==1.0.1 sure==1.2.5 +cassandra-driver>=2.0.0 diff --git a/setup.py b/setup.py index 2965a6d73a..dfbcdf05f1 100644 --- a/setup.py +++ b/setup.py @@ -31,7 +31,7 @@ "Topic :: Software Development :: Libraries :: Python Modules", ], keywords='cassandra,cql,orm', - install_requires = ['cql'], + install_requires = ['cql', 'cassandra-driver'], author='Blake Eggleston, Jon Haddad', author_email='bdeggleston@gmail.com', url='https://github.com/cqlengine/cqlengine', From 6180d5fda323e13c3956ce235b63c57f299755fe Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Mon, 9 Jun 2014 17:49:33 -0700 Subject: [PATCH 0688/3726] getting native driver setup --- cqlengine/connection.py | 12 +++++++++--- cqlengine/management.py | 1 - cqlengine/tests/base.py | 2 +- upgrading.txt | 4 ++++ 4 files changed, 14 insertions(+), 5 deletions(-) create mode 100644 upgrading.txt diff --git a/cqlengine/connection.py b/cqlengine/connection.py index 1274e3e7fa..aa5941314b 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -3,6 +3,8 @@ #http://cassandra.apache.org/doc/cql/CQL.html from collections import namedtuple +from cassandra.cluster import Cluster + try: import Queue as queue except ImportError: @@ -33,7 +35,8 @@ class CQLConnectionError(CQLEngineException): pass # global connection pool connection_pool = None - +cluster = None +session = None class CQLConnectionError(CQLEngineException): pass @@ -76,8 +79,11 @@ def setup( :type timeout: int or long """ - global _max_connections - global connection_pool + global _max_connections, connection_pool, cluster, session + + cluster = Cluster(hosts) + session = cluster.connect() + _max_connections = max_connections if default_keyspace: diff --git a/cqlengine/management.py b/cqlengine/management.py index ed31eb5759..1e0401a43a 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -17,7 +17,6 @@ # system keyspaces schema_columnfamilies = NamedTable('system', 'schema_columnfamilies') - def create_keyspace(name, strategy_class='SimpleStrategy', replication_factor=3, durable_writes=True, **replication_values): """ creates a keyspace diff --git a/cqlengine/tests/base.py b/cqlengine/tests/base.py index 98e34eaa7e..7700456b7b 100644 --- a/cqlengine/tests/base.py +++ b/cqlengine/tests/base.py @@ -6,7 +6,7 @@ if os.environ.get('CASSANDRA_TEST_HOST'): CASSANDRA_TEST_HOST = os.environ['CASSANDRA_TEST_HOST'] else: - CASSANDRA_TEST_HOST = 'localhost:9160' + CASSANDRA_TEST_HOST = 'localhost' class BaseCassEngTestCase(TestCase): diff --git a/upgrading.txt b/upgrading.txt new file mode 100644 index 0000000000..5f4f89d2aa --- /dev/null +++ b/upgrading.txt @@ -0,0 +1,4 @@ +0.15 Upgrade to use Datastax Native Driver + + + From 5053fb241aed055933cb9d3b6bbde9fa0f715779 Mon Sep 17 00:00:00 2001 From: Michael Cyrulnik Date: Mon, 16 Jun 2014 08:21:06 -0400 Subject: [PATCH 0689/3726] removing redundant assignment in MapUpdateClause --- cqlengine/statements.py | 1 - 1 file changed, 1 deletion(-) diff --git a/cqlengine/statements.py b/cqlengine/statements.py index 31d1ff088f..090fc62680 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -320,7 +320,6 @@ class MapUpdateClause(ContainerUpdateClause): def __init__(self, field, value, operation=None, previous=None, column=None): super(MapUpdateClause, self).__init__(field, value, operation, previous, column=column) self._updates = None - self.previous = self.previous def _analyze(self): if self._operation == "update": From 78d1654478777ec5959b83774d9ff74d696e4b18 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Mon, 16 Jun 2014 14:49:14 -0700 Subject: [PATCH 0690/3726] removed cqlengine based exceptios in favor of native driver exceptions --- cqlengine/connection.py | 7 ++++++ cqlengine/tests/base.py | 8 +++---- .../tests/connections/test_connection_pool.py | 22 ------------------- .../connections/test_connection_setup.py | 22 ------------------- upgrading.txt | 3 ++- 5 files changed, 13 insertions(+), 49 deletions(-) delete mode 100644 cqlengine/tests/connections/test_connection_pool.py delete mode 100644 cqlengine/tests/connections/test_connection_setup.py diff --git a/cqlengine/connection.py b/cqlengine/connection.py index aa5941314b..dbf92ecd07 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -261,3 +261,10 @@ def connection_manager(): # tmp = connection_pool.get() yield connection_pool # connection_pool.put(tmp) + + +def execute_native(query, params, consistency_level): + # TODO use consistency level + prepared = session.prepare(query) + result = session.execute(prepared, params) + return result diff --git a/cqlengine/tests/base.py b/cqlengine/tests/base.py index 7700456b7b..35b7cca97a 100644 --- a/cqlengine/tests/base.py +++ b/cqlengine/tests/base.py @@ -8,13 +8,13 @@ else: CASSANDRA_TEST_HOST = 'localhost' +connection.setup([CASSANDRA_TEST_HOST], default_keyspace='cqlengine_test') class BaseCassEngTestCase(TestCase): - @classmethod - def setUpClass(cls): - super(BaseCassEngTestCase, cls).setUpClass() - connection.setup([CASSANDRA_TEST_HOST], default_keyspace='cqlengine_test') + # @classmethod + # def setUpClass(cls): + # super(BaseCassEngTestCase, cls).setUpClass() def assertHasAttr(self, obj, attr): self.assertTrue(hasattr(obj, attr), diff --git a/cqlengine/tests/connections/test_connection_pool.py b/cqlengine/tests/connections/test_connection_pool.py deleted file mode 100644 index b34ea47182..0000000000 --- a/cqlengine/tests/connections/test_connection_pool.py +++ /dev/null @@ -1,22 +0,0 @@ -from unittest import TestCase -from cql import OperationalError -from mock import MagicMock, patch, Mock - -from cqlengine import ONE -from cqlengine.connection import ConnectionPool, Host - - -class OperationalErrorLoggingTest(TestCase): - def test_logging(self): - p = ConnectionPool([Host('127.0.0.1', '9160')]) - - class MockConnection(object): - host = 'localhost' - port = 6379 - def cursor(self): - raise OperationalError('test') - - - with patch.object(p, 'get', return_value=MockConnection()): - with self.assertRaises(OperationalError): - p.execute("select * from system.peers", {}, ONE) diff --git a/cqlengine/tests/connections/test_connection_setup.py b/cqlengine/tests/connections/test_connection_setup.py deleted file mode 100644 index e07820f93c..0000000000 --- a/cqlengine/tests/connections/test_connection_setup.py +++ /dev/null @@ -1,22 +0,0 @@ -from unittest import TestCase -from mock import patch - -from cqlengine.connection import setup as setup_connection -from cqlengine.connection import CQLConnectionError, Host - - -class OperationalErrorLoggingTest(TestCase): - - @patch('cqlengine.connection.ConnectionPool', return_value=None, autospec=True) - def test_setup_hosts(self, PatchedConnectionPool): - with self.assertRaises(CQLConnectionError): - setup_connection(hosts=['localhost:abcd']) - self.assertEqual(len(PatchedConnectionPool.mock_calls), 0) - - with self.assertRaises(CQLConnectionError): - setup_connection(hosts=['localhost:9160:abcd']) - self.assertEqual(len(PatchedConnectionPool.mock_calls), 0) - - setup_connection(hosts=['localhost:9161', 'remotehost']) - self.assertEqual(len(PatchedConnectionPool.mock_calls), 1) - self.assertEqual(PatchedConnectionPool.call_args[0][0], [Host('localhost', 9161), Host('remotehost', 9160)]) diff --git a/upgrading.txt b/upgrading.txt index 5f4f89d2aa..cf82097bff 100644 --- a/upgrading.txt +++ b/upgrading.txt @@ -1,4 +1,5 @@ 0.15 Upgrade to use Datastax Native Driver - +Calls to execute should no longer use named placeholders. +We no longer raise cqlengine based OperationalError when connection fails. Now using the exception thrown in the native driver. From 0bb08e22ea7b95d6095426c7cc421f17cb1c352d Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Mon, 16 Jun 2014 15:23:49 -0700 Subject: [PATCH 0691/3726] starting to use native driver for syncing tables --- cqlengine/connection.py | 3 +++ cqlengine/management.py | 15 +++++++-------- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index dbf92ecd07..a3ff3ba11a 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -246,6 +246,7 @@ def execute(self, query, params, consistency_level=None): def execute(query, params=None, consistency_level=None): + if isinstance(query, BaseCQLStatement): params = query.get_context() query = str(query) @@ -254,6 +255,8 @@ def execute(query, params=None, consistency_level=None): consistency_level = connection_pool._consistency return connection_pool.execute(query, params, consistency_level) + #return execute_native(query, params, consistency_level) + @contextmanager def connection_manager(): """ :rtype: ConnectionPool """ diff --git a/cqlengine/management.py b/cqlengine/management.py index 1e0401a43a..c926601d3b 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -4,7 +4,7 @@ from cqlengine import ONE from cqlengine.named import NamedTable -from cqlengine.connection import connection_manager, execute +from cqlengine.connection import connection_manager, execute, execute_native from cqlengine.exceptions import CQLEngineException import logging @@ -87,13 +87,12 @@ def sync_table(model, create_missing_keyspace=True): if create_missing_keyspace: create_keyspace(ks_name) - with connection_manager() as con: - tables = con.execute( - "SELECT columnfamily_name from system.schema_columnfamilies WHERE keyspace_name = :ks_name", - {'ks_name': ks_name}, - ONE - ) - tables = [x[0] for x in tables.results] + tables = execute_native( + "SELECT columnfamily_name from system.schema_columnfamilies WHERE keyspace_name = ?", + [ks_name], ONE + ) + + tables = [x.columnfamily_name for x in tables] #check for an existing column family if raw_cf_name not in tables: From eb19c3e3e722f4bd9fda970bd760a6a8350969e8 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Mon, 16 Jun 2014 16:25:34 -0700 Subject: [PATCH 0692/3726] creating table with native driver --- cqlengine/connection.py | 2 +- cqlengine/management.py | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index a3ff3ba11a..5db546c0a0 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -266,7 +266,7 @@ def connection_manager(): # connection_pool.put(tmp) -def execute_native(query, params, consistency_level): +def execute_native(query, params=None, consistency_level=None): # TODO use consistency level prepared = session.prepare(query) result = session.execute(prepared, params) diff --git a/cqlengine/management.py b/cqlengine/management.py index c926601d3b..c12b109f7f 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -99,7 +99,7 @@ def sync_table(model, create_missing_keyspace=True): qs = get_create_table(model) try: - execute(qs) + execute_native(qs) except CQLEngineException as ex: # 1.2 doesn't return cf names, so we have to examine the exception # and ignore if it says the column family already exists @@ -246,6 +246,8 @@ def get_fields(model): tmp = con.execute(query, {'ks_name': ks_name, 'col_family': col_family}, ONE) + #import ipdb; ipdb.set_trace() + # Tables containing only primary keys do not appear to create # any entries in system.schema_columns, as only non-primary-key attributes # appear to be inserted into the schema_columns table From 0c011a66d5866d994fa1987723d540af20be4b27 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Mon, 16 Jun 2014 16:29:13 -0700 Subject: [PATCH 0693/3726] performing table alterations using native driver --- cqlengine/management.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cqlengine/management.py b/cqlengine/management.py index c12b109f7f..3fdfa61eab 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -116,7 +116,7 @@ def sync_table(model, create_missing_keyspace=True): # add missing column using the column def query = "ALTER TABLE {} add {}".format(cf_name, col.get_column_def()) logger.debug(query) - execute(query) + execute_native(query) update_compaction(model) From bccca12f13e39f13939d6b53681c19f72340dca1 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Mon, 16 Jun 2014 16:45:43 -0700 Subject: [PATCH 0694/3726] reworking get_fields to use much simpler native driver calls --- cqlengine/management.py | 21 ++++++--------------- 1 file changed, 6 insertions(+), 15 deletions(-) diff --git a/cqlengine/management.py b/cqlengine/management.py index 3fdfa61eab..f81e591352 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -108,7 +108,7 @@ def sync_table(model, create_missing_keyspace=True): else: # see if we're missing any columns fields = get_fields(model) - field_names = [x.name for x in fields] + field_names = [x.name for x in fields] # [Field(name=u'name', type=u'org.apache.cassandra.db.marshal.UTF8Type')] for name, col in model._columns.items(): if col.primary_key or col.partition_key: continue # we can't mess with the PK if col.db_field_name in field_names: continue # skip columns already defined @@ -239,28 +239,19 @@ def get_fields(model): col_family = model.column_family_name(include_keyspace=False) with connection_manager() as con: - query = "SELECT * FROM system.schema_columns \ - WHERE keyspace_name = :ks_name AND columnfamily_name = :col_family" - - logger.debug("get_fields %s %s", ks_name, col_family) - - tmp = con.execute(query, {'ks_name': ks_name, 'col_family': col_family}, ONE) - - #import ipdb; ipdb.set_trace() + query = "select * from system.schema_columns where keyspace_name = ? and columnfamily_name = ?" + tmp = execute_native(query, [ks_name, col_family]) # Tables containing only primary keys do not appear to create # any entries in system.schema_columns, as only non-primary-key attributes # appear to be inserted into the schema_columns table - if not tmp.results: + if not tmp: return [] - column_name_positon = tmp.columns.index('column_name') - validator_positon = tmp.columns.index('validator') try: - type_position = tmp.columns.index('type') - return [Field(x[column_name_positon], x[validator_positon]) for x in tmp.results if x[type_position] == 'regular'] + return [Field(x.column_name, x.validator) for x in tmp if x.type == 'regular'] except ValueError: - return [Field(x[column_name_positon], x[validator_positon]) for x in tmp.results] + return [Field(x.column_name, x.validator) for x in tmp] # convert to Field named tuples From bbbb3a1fbb116cef29a196b8360f57faafeb1020 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Mon, 16 Jun 2014 16:51:32 -0700 Subject: [PATCH 0695/3726] get_fields now using native driver --- cqlengine/management.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/cqlengine/management.py b/cqlengine/management.py index f81e591352..329e1baa62 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -108,7 +108,7 @@ def sync_table(model, create_missing_keyspace=True): else: # see if we're missing any columns fields = get_fields(model) - field_names = [x.name for x in fields] # [Field(name=u'name', type=u'org.apache.cassandra.db.marshal.UTF8Type')] + field_names = [x.name for x in fields] for name, col in model._columns.items(): if col.primary_key or col.partition_key: continue # we can't mess with the PK if col.db_field_name in field_names: continue # skip columns already defined @@ -238,9 +238,8 @@ def get_fields(model): ks_name = model._get_keyspace() col_family = model.column_family_name(include_keyspace=False) - with connection_manager() as con: - query = "select * from system.schema_columns where keyspace_name = ? and columnfamily_name = ?" - tmp = execute_native(query, [ks_name, col_family]) + query = "select * from system.schema_columns where keyspace_name = ? and columnfamily_name = ?" + tmp = execute_native(query, [ks_name, col_family]) # Tables containing only primary keys do not appear to create # any entries in system.schema_columns, as only non-primary-key attributes From 434590cb850c10c6877aebb62da2724b1f08fe97 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Mon, 16 Jun 2014 17:15:48 -0700 Subject: [PATCH 0696/3726] setting compaction options via native --- cqlengine/management.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cqlengine/management.py b/cqlengine/management.py index 329e1baa62..c5bd694871 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -300,7 +300,7 @@ def update_compaction(model): cf_name = model.column_family_name() query = "ALTER TABLE {} with compaction = {}".format(cf_name, options) logger.debug(query) - execute(query) + execute_native(query) return True return False From 60d58c193044d8498696675af73f67d4382f38c1 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Mon, 16 Jun 2014 17:17:37 -0700 Subject: [PATCH 0697/3726] delete keyspace now uses native driver --- cqlengine/management.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/cqlengine/management.py b/cqlengine/management.py index c5bd694871..dee113fd5f 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -54,10 +54,9 @@ def create_keyspace(name, strategy_class='SimpleStrategy', replication_factor=3, def delete_keyspace(name): - with connection_manager() as con: - _, keyspaces = con.execute("""SELECT keyspace_name FROM system.schema_keyspaces""", {}, ONE) - if name in [r[0] for r in keyspaces]: - execute("DROP KEYSPACE {}".format(name)) + keyspaces = execute_native("""SELECT keyspace_name FROM system.schema_keyspaces""", {}, ONE) + if name in [r.keyspace_name for r in keyspaces]: + execute_native("DROP KEYSPACE {}".format(name)) def create_table(model, create_missing_keyspace=True): warnings.warn("create_table has been deprecated in favor of sync_table and will be removed in a future release", DeprecationWarning) From f1f2ef0356e24d46db7bd82407e8a23495b4a085 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Mon, 16 Jun 2014 17:33:55 -0700 Subject: [PATCH 0698/3726] dropping tables natively --- cqlengine/management.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cqlengine/management.py b/cqlengine/management.py index dee113fd5f..d2fac363f1 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -325,6 +325,6 @@ def drop_table(model): return cf_name = model.column_family_name() - execute('drop table {};'.format(cf_name)) + execute_native('drop table {};'.format(cf_name)) From 2dd3a388a0b659b481ac9d95a73261957bfebe3b Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Mon, 16 Jun 2014 18:15:04 -0700 Subject: [PATCH 0699/3726] drop table now uses the native driver metadata instead of trying to do crazy raw calls --- cqlengine/connection.py | 6 ++++++ cqlengine/management.py | 20 +++++++++----------- 2 files changed, 15 insertions(+), 11 deletions(-) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index 5db546c0a0..2768db1077 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -271,3 +271,9 @@ def execute_native(query, params=None, consistency_level=None): prepared = session.prepare(query) result = session.execute(prepared, params) return result + +def get_session(): + return session + +def get_cluster(): + return cluster diff --git a/cqlengine/management.py b/cqlengine/management.py index d2fac363f1..dc3f086200 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -4,7 +4,7 @@ from cqlengine import ONE from cqlengine.named import NamedTable -from cqlengine.connection import connection_manager, execute, execute_native +from cqlengine.connection import connection_manager, execute, execute_native, get_session, get_cluster from cqlengine.exceptions import CQLEngineException import logging @@ -313,18 +313,16 @@ def delete_table(model): def drop_table(model): # don't try to delete non existant tables + meta = get_cluster().metadata + ks_name = model._get_keyspace() - with connection_manager() as con: - _, tables = con.execute( - "SELECT columnfamily_name from system.schema_columnfamilies WHERE keyspace_name = :ks_name", - {'ks_name': ks_name}, - ONE - ) raw_cf_name = model.column_family_name(include_keyspace=False) - if raw_cf_name not in [t[0] for t in tables]: - return - cf_name = model.column_family_name() - execute_native('drop table {};'.format(cf_name)) + try: + table = meta.keyspaces[ks_name].tables[raw_cf_name] + execute_native('drop table {};'.format(model.column_family_name(include_keyspace=True))) + except KeyError: + pass + From 232e5e5558b86dd7cac5d9e5d3d3492bcf26a991 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Mon, 16 Jun 2014 18:19:08 -0700 Subject: [PATCH 0700/3726] switched to cluster metadata for creating keyspaces --- cqlengine/management.py | 48 ++++++++++++++++++++--------------------- 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/cqlengine/management.py b/cqlengine/management.py index dc3f086200..678ff89597 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -27,30 +27,30 @@ def create_keyspace(name, strategy_class='SimpleStrategy', replication_factor=3, :param durable_writes: 1.2 only, write log is bypassed if set to False :param **replication_values: 1.2 only, additional values to ad to the replication data map """ - with connection_manager() as con: - _, keyspaces = con.execute("""SELECT keyspace_name FROM system.schema_keyspaces""", {}, ONE) - if name not in [r[0] for r in keyspaces]: - #try the 1.2 method - replication_map = { - 'class': strategy_class, - 'replication_factor':replication_factor - } - replication_map.update(replication_values) - if strategy_class.lower() != 'simplestrategy': - # Although the Cassandra documentation states for `replication_factor` - # that it is "Required if class is SimpleStrategy; otherwise, - # not used." we get an error if it is present. - replication_map.pop('replication_factor', None) - - query = """ - CREATE KEYSPACE {} - WITH REPLICATION = {} - """.format(name, json.dumps(replication_map).replace('"', "'")) - - if strategy_class != 'SimpleStrategy': - query += " AND DURABLE_WRITES = {}".format('true' if durable_writes else 'false') - - execute(query) + cluster = get_cluster() + + if name not in cluster.metadata.keyspaces: + #try the 1.2 method + replication_map = { + 'class': strategy_class, + 'replication_factor':replication_factor + } + replication_map.update(replication_values) + if strategy_class.lower() != 'simplestrategy': + # Although the Cassandra documentation states for `replication_factor` + # that it is "Required if class is SimpleStrategy; otherwise, + # not used." we get an error if it is present. + replication_map.pop('replication_factor', None) + + query = """ + CREATE KEYSPACE {} + WITH REPLICATION = {} + """.format(name, json.dumps(replication_map).replace('"', "'")) + + if strategy_class != 'SimpleStrategy': + query += " AND DURABLE_WRITES = {}".format('true' if durable_writes else 'false') + + execute_native(query) def delete_keyspace(name): From a5b4535a8c3bf0152c077fa8a62df7b462da6869 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Mon, 16 Jun 2014 18:38:58 -0700 Subject: [PATCH 0701/3726] removed a bunch of raw CQL calls in favor of the driver --- cqlengine/management.py | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/cqlengine/management.py b/cqlengine/management.py index 678ff89597..8ddf8de128 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -54,8 +54,8 @@ def create_keyspace(name, strategy_class='SimpleStrategy', replication_factor=3, def delete_keyspace(name): - keyspaces = execute_native("""SELECT keyspace_name FROM system.schema_keyspaces""", {}, ONE) - if name in [r.keyspace_name for r in keyspaces]: + cluster = get_cluster() + if name in cluster.metadata.keyspaces: execute_native("DROP KEYSPACE {}".format(name)) def create_table(model, create_missing_keyspace=True): @@ -86,12 +86,9 @@ def sync_table(model, create_missing_keyspace=True): if create_missing_keyspace: create_keyspace(ks_name) - tables = execute_native( - "SELECT columnfamily_name from system.schema_columnfamilies WHERE keyspace_name = ?", - [ks_name], ONE - ) + cluster = get_cluster() - tables = [x.columnfamily_name for x in tables] + tables = cluster.metadata.keyspaces[ks_name].tables #check for an existing column family if raw_cf_name not in tables: From e4a763ceb3b2bc4526ec7c4da6a6b5e368d6c936 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Mon, 16 Jun 2014 19:24:46 -0700 Subject: [PATCH 0702/3726] logic for indexes now using native driver and metadata --- cqlengine/management.py | 37 +++++++++++++------------------------ 1 file changed, 13 insertions(+), 24 deletions(-) diff --git a/cqlengine/management.py b/cqlengine/management.py index 8ddf8de128..820a70139b 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -88,7 +88,8 @@ def sync_table(model, create_missing_keyspace=True): cluster = get_cluster() - tables = cluster.metadata.keyspaces[ks_name].tables + keyspace = cluster.metadata.keyspaces[ks_name] + tables = keyspace.tables #check for an existing column family if raw_cf_name not in tables: @@ -117,31 +118,19 @@ def sync_table(model, create_missing_keyspace=True): update_compaction(model) - #get existing index names, skip ones that already exist - with connection_manager() as con: - _, idx_names = con.execute( - "SELECT index_name from system.\"IndexInfo\" WHERE table_name=:table_name", - {'table_name': raw_cf_name}, - ONE - ) - - idx_names = [i[0] for i in idx_names] - idx_names = filter(None, idx_names) + table = cluster.metadata.keyspaces[ks_name].tables[raw_cf_name] indexes = [c for n,c in model._columns.items() if c.index] - if indexes: - for column in indexes: - if column.db_index_name in idx_names: continue - qs = ['CREATE INDEX index_{}_{}'.format(raw_cf_name, column.db_field_name)] - qs += ['ON {}'.format(cf_name)] - qs += ['("{}")'.format(column.db_field_name)] - qs = ' '.join(qs) - - try: - execute(qs) - except CQLEngineException: - # index already exists - pass + + for column in indexes: + if table.columns[column.db_field_name].index: + continue + + qs = ['CREATE INDEX index_{}_{}'.format(raw_cf_name, column.db_field_name)] + qs += ['ON {}'.format(cf_name)] + qs += ['("{}")'.format(column.db_field_name)] + qs = ' '.join(qs) + execute_native(qs) def get_create_table(model): cf_name = model.column_family_name() From cf5dbbda848b243c8df2cba0bec8b1dd367728ce Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Tue, 17 Jun 2014 09:48:57 -0700 Subject: [PATCH 0703/3726] version bump --- cqlengine/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cqlengine/VERSION b/cqlengine/VERSION index 54d1a4f2a4..a803cc227f 100644 --- a/cqlengine/VERSION +++ b/cqlengine/VERSION @@ -1 +1 @@ -0.13.0 +0.14.0 From c9c8bb4b430f758e820b52bf762dd650ea7fcad3 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Tue, 17 Jun 2014 12:27:38 -0700 Subject: [PATCH 0704/3726] breaking everything, more or less --- cqlengine/statements.py | 28 +++++++-------- .../statements/test_assignment_clauses.py | 34 +++++++++---------- .../tests/statements/test_delete_statement.py | 4 +-- .../tests/statements/test_insert_statement.py | 4 +-- .../tests/statements/test_update_statement.py | 4 +-- .../tests/statements/test_where_clause.py | 4 +-- 6 files changed, 39 insertions(+), 39 deletions(-) diff --git a/cqlengine/statements.py b/cqlengine/statements.py index 31d1ff088f..8a92fb6697 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -101,7 +101,7 @@ def __init__(self, field, operator, value, quote_field=True): def __unicode__(self): field = ('"{}"' if self.quote_field else '{}').format(self.field) - return u'{} {} {}'.format(field, self.operator, unicode(self.query_value)) + return u'{} {} ?'.format(field, self.operator) def __hash__(self): return super(WhereClause, self).__hash__() ^ hash(self.operator) @@ -129,7 +129,7 @@ class AssignmentClause(BaseClause): """ a single variable st statement """ def __unicode__(self): - return u'"{}" = :{}'.format(self.field, self.context_id) + return u'"{}" = ?'.format(self.field) def insert_tuple(self): return self.field, self.context_id @@ -170,15 +170,15 @@ def __unicode__(self): qs = [] ctx_id = self.context_id if self.previous is None and not (self._assignments or self._additions or self._removals): - qs += ['"{}" = :{}'.format(self.field, ctx_id)] + qs += ['"{}" = ?'.format(self.field)] if self._assignments: - qs += ['"{}" = :{}'.format(self.field, ctx_id)] + qs += ['"{}" = ?'.format(self.field)] ctx_id += 1 if self._additions: - qs += ['"{0}" = "{0}" + :{1}'.format(self.field, ctx_id)] + qs += ['"{0}" = "{0}" + ?'.format(self.field)] ctx_id += 1 if self._removals: - qs += ['"{0}" = "{0}" - :{1}'.format(self.field, ctx_id)] + qs += ['"{0}" = "{0}" - ?'.format(self.field)] return ', '.join(qs) @@ -232,15 +232,15 @@ def __unicode__(self): qs = [] ctx_id = self.context_id if self._assignments is not None: - qs += ['"{}" = :{}'.format(self.field, ctx_id)] + qs += ['"{}" = ?'.format(self.field)] ctx_id += 1 if self._prepend: - qs += ['"{0}" = :{1} + "{0}"'.format(self.field, ctx_id)] + qs += ['"{0}" = ? + "{0}"'.format(self.field)] ctx_id += 1 if self._append: - qs += ['"{0}" = "{0}" + :{1}'.format(self.field, ctx_id)] + qs += ['"{0}" = "{0}" + ?'.format(self.field)] return ', '.join(qs) @@ -356,10 +356,10 @@ def __unicode__(self): ctx_id = self.context_id if self.previous is None and not self._updates: - qs += ['"int_map" = :1'] + qs += ['"int_map" = ?'] else: for _ in self._updates or []: - qs += ['"{}"[:{}] = :{}'.format(self.field, ctx_id, ctx_id + 1)] + qs += ['"{}"[?] = ?'.format(self.field)] ctx_id += 2 return ', '.join(qs) @@ -380,7 +380,7 @@ def update_context(self, ctx): def __unicode__(self): delta = self.value - self.previous sign = '-' if delta < 0 else '+' - return '"{0}" = "{0}" {1} :{2}'.format(self.field, sign, self.context_id) + return '"{0}" = "{0}" {1} ?'.format(self.field, sign) class BaseDeleteClause(BaseClause): @@ -428,7 +428,7 @@ def get_context_size(self): def __unicode__(self): if not self._analyzed: self._analyze() - return ', '.join(['"{}"[:{}]'.format(self.field, self.context_id + i) for i in range(len(self._removals))]) + return ', '.join(['"{}"[?]'.format(self.field) for i in range(len(self._removals))]) class BaseCQLStatement(object): @@ -626,7 +626,7 @@ def __unicode__(self): qs += ["({})".format(', '.join(['"{}"'.format(c) for c in columns]))] qs += ['VALUES'] - qs += ["({})".format(', '.join([':{}'.format(v) for v in values]))] + qs += ["({})".format(', '.join(['?' for v in values]))] if self.ttl: qs += ["USING TTL {}".format(self.ttl)] diff --git a/cqlengine/tests/statements/test_assignment_clauses.py b/cqlengine/tests/statements/test_assignment_clauses.py index 126dd9869a..48f58edc5a 100644 --- a/cqlengine/tests/statements/test_assignment_clauses.py +++ b/cqlengine/tests/statements/test_assignment_clauses.py @@ -25,7 +25,7 @@ def test_update_from_none(self): self.assertIsNone(c._removals) self.assertEqual(c.get_context_size(), 1) - self.assertEqual(str(c), '"s" = :0') + self.assertEqual(str(c), '"s" = ?') ctx = {} c.update_context(ctx) @@ -75,7 +75,7 @@ def test_additions(self): self.assertIsNone(c._removals) self.assertEqual(c.get_context_size(), 1) - self.assertEqual(str(c), '"s" = "s" + :0') + self.assertEqual(str(c), '"s" = "s" + ?') ctx = {} c.update_context(ctx) @@ -91,7 +91,7 @@ def test_removals(self): self.assertEqual(c._removals, {3}) self.assertEqual(c.get_context_size(), 1) - self.assertEqual(str(c), '"s" = "s" - :0') + self.assertEqual(str(c), '"s" = "s" - ?') ctx = {} c.update_context(ctx) @@ -107,7 +107,7 @@ def test_additions_and_removals(self): self.assertEqual(c._removals, {1}) self.assertEqual(c.get_context_size(), 2) - self.assertEqual(str(c), '"s" = "s" + :0, "s" = "s" - :1') + self.assertEqual(str(c), '"s" = "s" + ?, "s" = "s" - ?') ctx = {} c.update_context(ctx) @@ -126,7 +126,7 @@ def test_update_from_none(self): self.assertIsNone(c._prepend) self.assertEqual(c.get_context_size(), 1) - self.assertEqual(str(c), '"s" = :0') + self.assertEqual(str(c), '"s" = ?') ctx = {} c.update_context(ctx) @@ -142,7 +142,7 @@ def test_update_from_empty(self): self.assertIsNone(c._prepend) self.assertEqual(c.get_context_size(), 1) - self.assertEqual(str(c), '"s" = :0') + self.assertEqual(str(c), '"s" = ?') ctx = {} c.update_context(ctx) @@ -158,7 +158,7 @@ def test_update_from_different_list(self): self.assertIsNone(c._prepend) self.assertEqual(c.get_context_size(), 1) - self.assertEqual(str(c), '"s" = :0') + self.assertEqual(str(c), '"s" = ?') ctx = {} c.update_context(ctx) @@ -174,7 +174,7 @@ def test_append(self): self.assertIsNone(c._prepend) self.assertEqual(c.get_context_size(), 1) - self.assertEqual(str(c), '"s" = "s" + :0') + self.assertEqual(str(c), '"s" = "s" + ?') ctx = {} c.update_context(ctx) @@ -190,7 +190,7 @@ def test_prepend(self): self.assertEqual(c._prepend, [1, 2]) self.assertEqual(c.get_context_size(), 1) - self.assertEqual(str(c), '"s" = :0 + "s"') + self.assertEqual(str(c), '"s" = ? + "s"') ctx = {} c.update_context(ctx) @@ -207,7 +207,7 @@ def test_append_and_prepend(self): self.assertEqual(c._prepend, [1, 2]) self.assertEqual(c.get_context_size(), 2) - self.assertEqual(str(c), '"s" = :0 + "s", "s" = "s" + :1') + self.assertEqual(str(c), '"s" = ? + "s", "s" = "s" + ?') ctx = {} c.update_context(ctx) @@ -225,7 +225,7 @@ def test_shrinking_list_update(self): self.assertIsNone(c._prepend) self.assertEqual(c.get_context_size(), 1) - self.assertEqual(str(c), '"s" = :0') + self.assertEqual(str(c), '"s" = ?') ctx = {} c.update_context(ctx) @@ -241,7 +241,7 @@ def test_update(self): self.assertEqual(c._updates, [3, 5]) self.assertEqual(c.get_context_size(), 4) - self.assertEqual(str(c), '"s"[:0] = :1, "s"[:2] = :3') + self.assertEqual(str(c), '"s"[?] = ?, "s"[?] = ?') ctx = {} c.update_context(ctx) @@ -254,7 +254,7 @@ def test_update_from_null(self): self.assertEqual(c._updates, [3, 5]) self.assertEqual(c.get_context_size(), 4) - self.assertEqual(str(c), '"s"[:0] = :1, "s"[:2] = :3') + self.assertEqual(str(c), '"s"[?] = ?, "s"[?] = ?') ctx = {} c.update_context(ctx) @@ -275,7 +275,7 @@ def test_positive_update(self): c.set_context_id(5) self.assertEqual(c.get_context_size(), 1) - self.assertEqual(str(c), '"a" = "a" + :5') + self.assertEqual(str(c), '"a" = "a" + ?') ctx = {} c.update_context(ctx) @@ -286,7 +286,7 @@ def test_negative_update(self): c.set_context_id(3) self.assertEqual(c.get_context_size(), 1) - self.assertEqual(str(c), '"a" = "a" - :3') + self.assertEqual(str(c), '"a" = "a" - ?') ctx = {} c.update_context(ctx) @@ -297,7 +297,7 @@ def noop_update(self): c.set_context_id(5) self.assertEqual(c.get_context_size(), 1) - self.assertEqual(str(c), '"a" = "a" + :5') + self.assertEqual(str(c), '"a" = "a" + ?') ctx = {} c.update_context(ctx) @@ -313,7 +313,7 @@ def test_update(self): self.assertEqual(c._removals, [1, 5]) self.assertEqual(c.get_context_size(), 2) - self.assertEqual(str(c), '"s"[:0], "s"[:1]') + self.assertEqual(str(c), '"s"[?], "s"[?]') ctx = {} c.update_context(ctx) diff --git a/cqlengine/tests/statements/test_delete_statement.py b/cqlengine/tests/statements/test_delete_statement.py index 9ac9554383..ada5b7209f 100644 --- a/cqlengine/tests/statements/test_delete_statement.py +++ b/cqlengine/tests/statements/test_delete_statement.py @@ -31,7 +31,7 @@ def test_table_rendering(self): def test_where_clause_rendering(self): ds = DeleteStatement('table', None) ds.add_where_clause(WhereClause('a', EqualsOperator(), 'b')) - self.assertEqual(unicode(ds), 'DELETE FROM table WHERE "a" = :0', unicode(ds)) + self.assertEqual(unicode(ds), 'DELETE FROM table WHERE "a" = ?', unicode(ds)) def test_context_update(self): ds = DeleteStatement('table', None) @@ -39,7 +39,7 @@ def test_context_update(self): ds.add_where_clause(WhereClause('a', EqualsOperator(), 'b')) ds.update_context_id(7) - self.assertEqual(unicode(ds), 'DELETE "d"[:8] FROM table WHERE "a" = :7') + self.assertEqual(unicode(ds), 'DELETE "d"[?] FROM table WHERE "a" = ?') self.assertEqual(ds.get_context(), {'7': 'b', '8': 3}) def test_context(self): diff --git a/cqlengine/tests/statements/test_insert_statement.py b/cqlengine/tests/statements/test_insert_statement.py index 5e22d7d99e..eeffe556d9 100644 --- a/cqlengine/tests/statements/test_insert_statement.py +++ b/cqlengine/tests/statements/test_insert_statement.py @@ -17,7 +17,7 @@ def test_statement(self): self.assertEqual( unicode(ist), - 'INSERT INTO table ("a", "c") VALUES (:0, :1)' + 'INSERT INTO table ("a", "c") VALUES (?, ?)' ) def test_context_update(self): @@ -28,7 +28,7 @@ def test_context_update(self): ist.update_context_id(4) self.assertEqual( unicode(ist), - 'INSERT INTO table ("a", "c") VALUES (:4, :5)' + 'INSERT INTO table ("a", "c") VALUES (?, ?)' ) ctx = ist.get_context() self.assertEqual(ctx, {'4': 'b', '5': 'd'}) diff --git a/cqlengine/tests/statements/test_update_statement.py b/cqlengine/tests/statements/test_update_statement.py index 86dbc8be44..9fe32a1aeb 100644 --- a/cqlengine/tests/statements/test_update_statement.py +++ b/cqlengine/tests/statements/test_update_statement.py @@ -16,7 +16,7 @@ def test_rendering(self): us.add_assignment_clause(AssignmentClause('a', 'b')) us.add_assignment_clause(AssignmentClause('c', 'd')) us.add_where_clause(WhereClause('a', EqualsOperator(), 'x')) - self.assertEqual(unicode(us), 'UPDATE table SET "a" = :0, "c" = :1 WHERE "a" = :2', unicode(us)) + self.assertEqual(unicode(us), 'UPDATE table SET "a" = ?, "c" = ? WHERE "a" = ?', unicode(us)) def test_context(self): us = UpdateStatement('table') @@ -31,7 +31,7 @@ def test_context_update(self): us.add_assignment_clause(AssignmentClause('c', 'd')) us.add_where_clause(WhereClause('a', EqualsOperator(), 'x')) us.update_context_id(3) - self.assertEqual(unicode(us), 'UPDATE table SET "a" = :4, "c" = :5 WHERE "a" = :3') + self.assertEqual(unicode(us), 'UPDATE table SET "a" = ?, "c" = ? WHERE "a" = ?') self.assertEqual(us.get_context(), {'4': 'b', '5': 'd', '3': 'x'}) def test_additional_rendering(self): diff --git a/cqlengine/tests/statements/test_where_clause.py b/cqlengine/tests/statements/test_where_clause.py index 938a2b4919..083f154234 100644 --- a/cqlengine/tests/statements/test_where_clause.py +++ b/cqlengine/tests/statements/test_where_clause.py @@ -14,8 +14,8 @@ def test_where_clause_rendering(self): """ tests that where clauses are rendered properly """ wc = WhereClause('a', EqualsOperator(), 'c') wc.set_context_id(5) - self.assertEqual('"a" = :5', unicode(wc)) - self.assertEqual('"a" = :5', str(wc)) + self.assertEqual('"a" = ?', unicode(wc)) + self.assertEqual('"a" = ?', str(wc)) def test_equality_method(self): """ tests that 2 identical where clauses evaluate as == """ From 01bd654516e486491852524ff8dae6ded204d934 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Tue, 17 Jun 2014 12:32:32 -0700 Subject: [PATCH 0705/3726] shouldn't be any more old school statements in there anymore --- cqlengine/tests/statements/test_select_statement.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/cqlengine/tests/statements/test_select_statement.py b/cqlengine/tests/statements/test_select_statement.py index f358c6ae5a..7caafbd796 100644 --- a/cqlengine/tests/statements/test_select_statement.py +++ b/cqlengine/tests/statements/test_select_statement.py @@ -30,12 +30,12 @@ def test_table_rendering(self): def test_where_clause_rendering(self): ss = SelectStatement('table') ss.add_where_clause(WhereClause('a', EqualsOperator(), 'b')) - self.assertEqual(unicode(ss), 'SELECT * FROM table WHERE "a" = :0', unicode(ss)) + self.assertEqual(unicode(ss), 'SELECT * FROM table WHERE "a" = ?', unicode(ss)) def test_count(self): ss = SelectStatement('table', count=True, limit=10, order_by='d') ss.add_where_clause(WhereClause('a', EqualsOperator(), 'b')) - self.assertEqual(unicode(ss), 'SELECT COUNT(*) FROM table WHERE "a" = :0', unicode(ss)) + self.assertEqual(unicode(ss), 'SELECT COUNT(*) FROM table WHERE "a" = ?', unicode(ss)) self.assertNotIn('LIMIT', unicode(ss)) self.assertNotIn('ORDER', unicode(ss)) @@ -49,11 +49,11 @@ def test_context_id_update(self): ss = SelectStatement('table') ss.add_where_clause(WhereClause('a', EqualsOperator(), 'b')) self.assertEqual(ss.get_context(), {'0': 'b'}) - self.assertEqual(str(ss), 'SELECT * FROM table WHERE "a" = :0') + self.assertEqual(str(ss), 'SELECT * FROM table WHERE "a" = ?') ss.update_context_id(5) self.assertEqual(ss.get_context(), {'5': 'b'}) - self.assertEqual(str(ss), 'SELECT * FROM table WHERE "a" = :5') + self.assertEqual(str(ss), 'SELECT * FROM table WHERE "a" = ?') def test_additional_rendering(self): ss = SelectStatement( From aba577a2f98489a7da2a8c69d52a1e9054fda605 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Tue, 17 Jun 2014 14:11:54 -0700 Subject: [PATCH 0706/3726] updating the compaction options codepath to use native metadata rather than query system tables --- cqlengine/management.py | 27 ++++++++++----------------- 1 file changed, 10 insertions(+), 17 deletions(-) diff --git a/cqlengine/management.py b/cqlengine/management.py index 820a70139b..995b27b2c4 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -4,7 +4,7 @@ from cqlengine import ONE from cqlengine.named import NamedTable -from cqlengine.connection import connection_manager, execute, execute_native, get_session, get_cluster +from cqlengine.connection import execute_native, get_cluster from cqlengine.exceptions import CQLEngineException import logging @@ -240,10 +240,13 @@ def get_fields(model): def get_table_settings(model): - return schema_columnfamilies.objects.consistency(ONE).get( - keyspace_name=model._get_keyspace(), - columnfamily_name=model.column_family_name(include_keyspace=False)) + # returns the table as provided by the native driver for a given model + cluster = get_cluster() + ks = model._get_keyspace() + table = model.column_family_name(include_keyspace=False) + table = cluster.metadata.keyspaces[ks].tables[table] + return table def update_compaction(model): """Updates the compaction options for the given model if necessary. @@ -255,21 +258,11 @@ def update_compaction(model): :rtype: bool """ logger.debug("Checking %s for compaction differences", model) - row = get_table_settings(model) - # check compaction_strategy_class - if not model.__compaction__: - return - - do_update = not row['compaction_strategy_class'].endswith(model.__compaction__) - - existing_options = json.loads(row['compaction_strategy_options']) - # The min/max thresholds are stored differently in the system data dictionary - existing_options.update({ - 'min_threshold': str(row['min_compaction_threshold']), - 'max_threshold': str(row['max_compaction_threshold']), - }) + table = get_table_settings(model) + existing_options = table.compaction_options.copy() desired_options = get_compaction_options(model) + desired_options.pop('class', None) for k, v in desired_options.items(): From 5c32d6c6aa870031a61fef2a6f16bbf82da2e84e Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Tue, 17 Jun 2014 14:15:13 -0700 Subject: [PATCH 0707/3726] removed code for testing connection pool, which is being removed --- cqlengine/tests/management/test_management.py | 71 ++++--------------- 1 file changed, 14 insertions(+), 57 deletions(-) diff --git a/cqlengine/tests/management/test_management.py b/cqlengine/tests/management/test_management.py index 0ba27cd5a1..b578b15897 100644 --- a/cqlengine/tests/management/test_management.py +++ b/cqlengine/tests/management/test_management.py @@ -1,54 +1,11 @@ -from mock import MagicMock, patch -from cqlengine import ONE -from cqlengine.exceptions import CQLEngineException -from cqlengine.management import create_table, delete_table, get_fields, sync_table +from cqlengine.management import get_fields, sync_table, drop_table from cqlengine.tests.base import BaseCassEngTestCase -from cqlengine.connection import ConnectionPool, Host from cqlengine import management from cqlengine.tests.query.test_queryset import TestModel from cqlengine.models import Model from cqlengine import columns, SizeTieredCompactionStrategy, LeveledCompactionStrategy -class ConnectionPoolFailoverTestCase(BaseCassEngTestCase): - """Test cassandra connection pooling.""" - - def setUp(self): - self.host = Host('127.0.0.1', '9160') - self.pool = ConnectionPool([self.host]) - - def test_totally_dead_pool(self): - # kill the con - with patch('cqlengine.connection.cql.connect') as mock: - mock.side_effect=CQLEngineException - with self.assertRaises(CQLEngineException): - self.pool.execute("select * from system.peers", {}, ONE) - - def test_dead_node(self): - """ - tests that a single dead node doesn't mess up the pool - """ - self.pool._hosts.append(self.host) - - # cursor mock needed so set_cql_version doesn't crap out - ok_cur = MagicMock() - - ok_conn = MagicMock() - ok_conn.return_value = ok_cur - - - returns = [CQLEngineException(), ok_conn] - - def side_effect(*args, **kwargs): - result = returns.pop(0) - if isinstance(result, Exception): - raise result - return result - - with patch('cqlengine.connection.cql.connect') as mock: - mock.side_effect = side_effect - conn = self.pool._create_connection() - class CreateKeyspaceTest(BaseCassEngTestCase): def test_create_succeeeds(self): @@ -61,10 +18,10 @@ def test_multiple_deletes_dont_fail(self): """ """ - create_table(TestModel) + sync_table(TestModel) - delete_table(TestModel) - delete_table(TestModel) + drop_table(TestModel) + drop_table(TestModel) class LowercaseKeyModel(Model): first_key = columns.Integer(primary_key=True) @@ -87,11 +44,11 @@ class CapitalizedKeyTest(BaseCassEngTestCase): def test_table_definition(self): """ Tests that creating a table with capitalized column names succeedso """ - create_table(LowercaseKeyModel) - create_table(CapitalizedKeyModel) + sync_table(LowercaseKeyModel) + sync_table(CapitalizedKeyModel) - delete_table(LowercaseKeyModel) - delete_table(CapitalizedKeyModel) + drop_table(LowercaseKeyModel) + drop_table(CapitalizedKeyModel) class FirstModel(Model): @@ -125,25 +82,25 @@ class FourthModel(Model): class AddColumnTest(BaseCassEngTestCase): def setUp(self): - delete_table(FirstModel) + drop_table(FirstModel) def test_add_column(self): - create_table(FirstModel) + sync_table(FirstModel) fields = get_fields(FirstModel) # this should contain the second key self.assertEqual(len(fields), 2) # get schema - create_table(SecondModel) + sync_table(SecondModel) fields = get_fields(FirstModel) self.assertEqual(len(fields), 3) - create_table(ThirdModel) + sync_table(ThirdModel) fields = get_fields(FirstModel) self.assertEqual(len(fields), 4) - create_table(FourthModel) + sync_table(FourthModel) fields = get_fields(FirstModel) self.assertEqual(len(fields), 4) @@ -151,7 +108,7 @@ def test_add_column(self): class SyncTableTests(BaseCassEngTestCase): def setUp(self): - delete_table(PrimaryKeysOnlyModel) + drop_table(PrimaryKeysOnlyModel) def test_sync_table_works_with_primary_keys_only_tables(self): From ac37c33909e2940059f9397e5c013025c5c24cc0 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 18 Jun 2014 12:03:28 -0700 Subject: [PATCH 0708/3726] fixing broken tests --- cqlengine/management.py | 8 +++++--- cqlengine/tests/management/test_compaction_settings.py | 2 +- cqlengine/tests/management/test_management.py | 5 +++-- 3 files changed, 9 insertions(+), 6 deletions(-) diff --git a/cqlengine/management.py b/cqlengine/management.py index 995b27b2c4..5810c12f12 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -244,7 +244,6 @@ def get_table_settings(model): cluster = get_cluster() ks = model._get_keyspace() table = model.column_family_name(include_keyspace=False) - table = cluster.metadata.keyspaces[ks].tables[table] return table @@ -259,12 +258,15 @@ def update_compaction(model): """ logger.debug("Checking %s for compaction differences", model) table = get_table_settings(model) - existing_options = table.compaction_options.copy() - desired_options = get_compaction_options(model) + existing_options = table.options.copy() + + desired_options = get_compaction_options(model).get("compaction_strategy_options", {}) desired_options.pop('class', None) + do_update = False + for k, v in desired_options.items(): val = existing_options.pop(k, None) if val != v: diff --git a/cqlengine/tests/management/test_compaction_settings.py b/cqlengine/tests/management/test_compaction_settings.py index e2147e6ccd..aef0d5facf 100644 --- a/cqlengine/tests/management/test_compaction_settings.py +++ b/cqlengine/tests/management/test_compaction_settings.py @@ -136,7 +136,7 @@ def test_alter_actually_alters(self): table_settings = get_table_settings(tmp) - self.assertRegexpMatches(table_settings['compaction_strategy_class'], '.*SizeTieredCompactionStrategy$') + self.assertRegexpMatches(table_settings.options['compaction_strategy_class'], '.*SizeTieredCompactionStrategy$') def test_alter_options(self): diff --git a/cqlengine/tests/management/test_management.py b/cqlengine/tests/management/test_management.py index b578b15897..67e77a356f 100644 --- a/cqlengine/tests/management/test_management.py +++ b/cqlengine/tests/management/test_management.py @@ -122,7 +122,8 @@ def test_sync_table_works_with_primary_keys_only_tables(self): # blows up with DoesNotExist if table does not exist table_settings = management.get_table_settings(PrimaryKeysOnlyModel) # let make sure the flag we care about - assert LeveledCompactionStrategy in table_settings['compaction_strategy_class'] + + assert LeveledCompactionStrategy in table_settings.options['compaction_strategy_class'] # Now we are "updating" the table: @@ -138,4 +139,4 @@ def test_sync_table_works_with_primary_keys_only_tables(self): sync_table(PrimaryKeysOnlyModel) table_settings = management.get_table_settings(PrimaryKeysOnlyModel) - assert SizeTieredCompactionStrategy in table_settings['compaction_strategy_class'] + assert SizeTieredCompactionStrategy in table_settings.options['compaction_strategy_class'] From e989bcf42fe161427d6c224b880f86effbd78c33 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 18 Jun 2014 12:15:11 -0700 Subject: [PATCH 0709/3726] fixing broken tests --- cqlengine/tests/management/test_compaction_settings.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/cqlengine/tests/management/test_compaction_settings.py b/cqlengine/tests/management/test_compaction_settings.py index aef0d5facf..3da83da7b7 100644 --- a/cqlengine/tests/management/test_compaction_settings.py +++ b/cqlengine/tests/management/test_compaction_settings.py @@ -196,13 +196,15 @@ class AllSizeTieredOptionsModel(Model): drop_table(AllSizeTieredOptionsModel) sync_table(AllSizeTieredOptionsModel) - settings = get_table_settings(AllSizeTieredOptionsModel) - options = json.loads(settings['compaction_strategy_options']) + options = get_table_settings(AllSizeTieredOptionsModel).options['compaction_strategy_options'] + options = json.loads(options) + expected = {u'min_threshold': u'2', u'bucket_low': u'0.3', u'tombstone_compaction_interval': u'86400', u'bucket_high': u'2', u'max_threshold': u'64'} + self.assertDictEqual(options, expected) From f709878577e78739491ce41ea8de98dd417f192b Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 18 Jun 2014 12:56:28 -0700 Subject: [PATCH 0710/3726] fixed all management tests --- cqlengine/management.py | 11 ++++++++++- .../tests/management/test_compaction_settings.py | 3 ++- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/cqlengine/management.py b/cqlengine/management.py index 5810c12f12..b5c6419acb 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -261,12 +261,21 @@ def update_compaction(model): existing_options = table.options.copy() - desired_options = get_compaction_options(model).get("compaction_strategy_options", {}) + existing_compaction_strategy = existing_options['compaction_strategy_class'] + + existing_options = json.loads(existing_options['compaction_strategy_options']) + + desired_options = get_compaction_options(model) + + desired_compact_strategy = desired_options.get('class', SizeTieredCompactionStrategy) desired_options.pop('class', None) do_update = False + if desired_compact_strategy not in existing_compaction_strategy: + do_update = True + for k, v in desired_options.items(): val = existing_options.pop(k, None) if val != v: diff --git a/cqlengine/tests/management/test_compaction_settings.py b/cqlengine/tests/management/test_compaction_settings.py index 3da83da7b7..b34a5cafe0 100644 --- a/cqlengine/tests/management/test_compaction_settings.py +++ b/cqlengine/tests/management/test_compaction_settings.py @@ -220,7 +220,8 @@ class AllLeveledOptionsModel(Model): drop_table(AllLeveledOptionsModel) sync_table(AllLeveledOptionsModel) - settings = get_table_settings(AllLeveledOptionsModel) + settings = get_table_settings(AllLeveledOptionsModel).options + options = json.loads(settings['compaction_strategy_options']) self.assertDictEqual(options, {u'sstable_size_in_mb': u'64'}) From 4cab5851f0a56ca23a1ad7bbd34c8145d2dc37cc Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 18 Jun 2014 13:39:40 -0700 Subject: [PATCH 0711/3726] making sure we're 1.2 compatible --- cqlengine/connection.py | 8 ++++++-- cqlengine/management.py | 2 +- cqlengine/query.py | 4 ++-- cqlengine/tests/statements/test_base_clause.py | 4 ++-- 4 files changed, 11 insertions(+), 7 deletions(-) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index 2768db1077..5e89c60dc5 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -268,8 +268,12 @@ def connection_manager(): def execute_native(query, params=None, consistency_level=None): # TODO use consistency level - prepared = session.prepare(query) - result = session.execute(prepared, params) + if isinstance(query, BaseCQLStatement): + params = query.get_context() + query = str(query) + params = params or {} + + result = session.execute(query, params) return result def get_session(): diff --git a/cqlengine/management.py b/cqlengine/management.py index b5c6419acb..c0562bab89 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -223,7 +223,7 @@ def get_fields(model): ks_name = model._get_keyspace() col_family = model.column_family_name(include_keyspace=False) - query = "select * from system.schema_columns where keyspace_name = ? and columnfamily_name = ?" + query = "select * from system.schema_columns where keyspace_name = %s and columnfamily_name = %s" tmp = execute_native(query, [ks_name, col_family]) # Tables containing only primary keys do not appear to create diff --git a/cqlengine/query.py b/cqlengine/query.py index 383b9c0510..4ea884ee6b 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -4,7 +4,7 @@ from cqlengine import BaseContainerColumn, Map, columns from cqlengine.columns import Counter, List, Set -from cqlengine.connection import execute, RowResult +from cqlengine.connection import execute, RowResult, execute_native from cqlengine.exceptions import CQLEngineException, ValidationError from cqlengine.functions import Token, BaseQueryFunction, QueryValue @@ -775,7 +775,7 @@ def _execute(self, q): if self._batch: return self._batch.add_query(q) else: - return execute(q, consistency_level=self._consistency) + return execute_native(q, consistency_level=self._consistency) def batch(self, batch_obj): if batch_obj is not None and not isinstance(batch_obj, BatchQuery): diff --git a/cqlengine/tests/statements/test_base_clause.py b/cqlengine/tests/statements/test_base_clause.py index c5bbeb402d..4fd6718df7 100644 --- a/cqlengine/tests/statements/test_base_clause.py +++ b/cqlengine/tests/statements/test_base_clause.py @@ -8,9 +8,9 @@ def test_context_updating(self): ss = BaseClause('a', 'b') assert ss.get_context_size() == 1 - ctx = {} + ctx = [] ss.set_context_id(10) ss.update_context(ctx) - assert ctx == {'10': 'b'} + assert ctx == ['10': 'b'] From f02692ff27539bc60cab38800f8c05ea62e804fa Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 18 Jun 2014 14:19:28 -0700 Subject: [PATCH 0712/3726] switching from the prepared statements to parameterized statements --- cqlengine/functions.py | 2 +- cqlengine/statements.py | 26 +++++++++---------- .../tests/statements/test_base_clause.py | 2 +- .../tests/statements/test_update_statement.py | 2 +- .../tests/statements/test_where_clause.py | 5 ++-- 5 files changed, 19 insertions(+), 18 deletions(-) diff --git a/cqlengine/functions.py b/cqlengine/functions.py index 5796ba4ae4..d70b129710 100644 --- a/cqlengine/functions.py +++ b/cqlengine/functions.py @@ -9,7 +9,7 @@ class QueryValue(object): be passed into .filter() keyword args """ - format_string = ':{}' + format_string = '%({})s' def __init__(self, value): self.value = value diff --git a/cqlengine/statements.py b/cqlengine/statements.py index 8a92fb6697..dcd9c410a7 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -101,7 +101,7 @@ def __init__(self, field, operator, value, quote_field=True): def __unicode__(self): field = ('"{}"' if self.quote_field else '{}').format(self.field) - return u'{} {} ?'.format(field, self.operator) + return u'{} {} {}'.format(field, self.operator, unicode(self.query_value)) def __hash__(self): return super(WhereClause, self).__hash__() ^ hash(self.operator) @@ -129,7 +129,7 @@ class AssignmentClause(BaseClause): """ a single variable st statement """ def __unicode__(self): - return u'"{}" = ?'.format(self.field) + return u'"{}" = %({})s'.format(self.field, self.context_id) def insert_tuple(self): return self.field, self.context_id @@ -170,15 +170,15 @@ def __unicode__(self): qs = [] ctx_id = self.context_id if self.previous is None and not (self._assignments or self._additions or self._removals): - qs += ['"{}" = ?'.format(self.field)] + qs += ['"{}" = %({})s'.format(self.field, ctx_id)] if self._assignments: - qs += ['"{}" = ?'.format(self.field)] + qs += ['"{}" = %({})s'.format(self.field, ctx_id)] ctx_id += 1 if self._additions: - qs += ['"{0}" = "{0}" + ?'.format(self.field)] + qs += ['"{0}" = "{0}" + %({1})s'.format(self.field, ctx_id)] ctx_id += 1 if self._removals: - qs += ['"{0}" = "{0}" - ?'.format(self.field)] + qs += ['"{0}" = "{0}" - %({1})s'.format(self.field, ctx_id)] return ', '.join(qs) @@ -232,15 +232,15 @@ def __unicode__(self): qs = [] ctx_id = self.context_id if self._assignments is not None: - qs += ['"{}" = ?'.format(self.field)] + qs += ['"{}" = %({})s'.format(self.field, ctx_id)] ctx_id += 1 if self._prepend: - qs += ['"{0}" = ? + "{0}"'.format(self.field)] + qs += ['"{0}" = %({1})s + "{0}"'.format(self.field, ctx_id)] ctx_id += 1 if self._append: - qs += ['"{0}" = "{0}" + ?'.format(self.field)] + qs += ['"{0}" = "{0}" + %({})s'.format(self.field, ctx_id)] return ', '.join(qs) @@ -356,10 +356,10 @@ def __unicode__(self): ctx_id = self.context_id if self.previous is None and not self._updates: - qs += ['"int_map" = ?'] + qs += ['"int_map" = %({})s'] else: for _ in self._updates or []: - qs += ['"{}"[?] = ?'.format(self.field)] + qs += ['"{}"[%({})s] = %({})s'.format(self.field, ctx_id, ctx_id + 1)] ctx_id += 2 return ', '.join(qs) @@ -380,7 +380,7 @@ def update_context(self, ctx): def __unicode__(self): delta = self.value - self.previous sign = '-' if delta < 0 else '+' - return '"{0}" = "{0}" {1} ?'.format(self.field, sign) + return '"{0}" = "{0}" {1} %({})s'.format(self.field, sign, self.context_id) class BaseDeleteClause(BaseClause): @@ -428,7 +428,7 @@ def get_context_size(self): def __unicode__(self): if not self._analyzed: self._analyze() - return ', '.join(['"{}"[?]'.format(self.field) for i in range(len(self._removals))]) + return ', '.join(['"{}"[%({})s]'.format(self.field) for i in range(len(self._removals))]) class BaseCQLStatement(object): diff --git a/cqlengine/tests/statements/test_base_clause.py b/cqlengine/tests/statements/test_base_clause.py index 4fd6718df7..746cafae31 100644 --- a/cqlengine/tests/statements/test_base_clause.py +++ b/cqlengine/tests/statements/test_base_clause.py @@ -11,6 +11,6 @@ def test_context_updating(self): ctx = [] ss.set_context_id(10) ss.update_context(ctx) - assert ctx == ['10': 'b'] + assert ctx == {'10': 'b'} diff --git a/cqlengine/tests/statements/test_update_statement.py b/cqlengine/tests/statements/test_update_statement.py index 9fe32a1aeb..b98cfae9b0 100644 --- a/cqlengine/tests/statements/test_update_statement.py +++ b/cqlengine/tests/statements/test_update_statement.py @@ -16,7 +16,7 @@ def test_rendering(self): us.add_assignment_clause(AssignmentClause('a', 'b')) us.add_assignment_clause(AssignmentClause('c', 'd')) us.add_where_clause(WhereClause('a', EqualsOperator(), 'x')) - self.assertEqual(unicode(us), 'UPDATE table SET "a" = ?, "c" = ? WHERE "a" = ?', unicode(us)) + self.assertEqual(unicode(us), 'UPDATE table SET "a" = %(0)s, "c" = %(1)s WHERE "a" = %(2)s', unicode(us)) def test_context(self): us = UpdateStatement('table') diff --git a/cqlengine/tests/statements/test_where_clause.py b/cqlengine/tests/statements/test_where_clause.py index 083f154234..be81b63e6c 100644 --- a/cqlengine/tests/statements/test_where_clause.py +++ b/cqlengine/tests/statements/test_where_clause.py @@ -14,8 +14,9 @@ def test_where_clause_rendering(self): """ tests that where clauses are rendered properly """ wc = WhereClause('a', EqualsOperator(), 'c') wc.set_context_id(5) - self.assertEqual('"a" = ?', unicode(wc)) - self.assertEqual('"a" = ?', str(wc)) + + self.assertEqual('"a" = %(5)s', unicode(wc), unicode(wc)) + self.assertEqual('"a" = %(5)s', str(wc), type(wc)) def test_equality_method(self): """ tests that 2 identical where clauses evaluate as == """ From 81081e709ba6a4062602e9734b526bb3b376a626 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 18 Jun 2014 14:24:12 -0700 Subject: [PATCH 0713/3726] fixed select queries, can use params now --- cqlengine/tests/statements/test_select_statement.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/cqlengine/tests/statements/test_select_statement.py b/cqlengine/tests/statements/test_select_statement.py index 7caafbd796..3afe34e2d9 100644 --- a/cqlengine/tests/statements/test_select_statement.py +++ b/cqlengine/tests/statements/test_select_statement.py @@ -30,12 +30,12 @@ def test_table_rendering(self): def test_where_clause_rendering(self): ss = SelectStatement('table') ss.add_where_clause(WhereClause('a', EqualsOperator(), 'b')) - self.assertEqual(unicode(ss), 'SELECT * FROM table WHERE "a" = ?', unicode(ss)) + self.assertEqual(unicode(ss), 'SELECT * FROM table WHERE "a" = %(0)s', unicode(ss)) def test_count(self): ss = SelectStatement('table', count=True, limit=10, order_by='d') ss.add_where_clause(WhereClause('a', EqualsOperator(), 'b')) - self.assertEqual(unicode(ss), 'SELECT COUNT(*) FROM table WHERE "a" = ?', unicode(ss)) + self.assertEqual(unicode(ss), 'SELECT COUNT(*) FROM table WHERE "a" = %(0)s', unicode(ss)) self.assertNotIn('LIMIT', unicode(ss)) self.assertNotIn('ORDER', unicode(ss)) @@ -49,11 +49,11 @@ def test_context_id_update(self): ss = SelectStatement('table') ss.add_where_clause(WhereClause('a', EqualsOperator(), 'b')) self.assertEqual(ss.get_context(), {'0': 'b'}) - self.assertEqual(str(ss), 'SELECT * FROM table WHERE "a" = ?') + self.assertEqual(str(ss), 'SELECT * FROM table WHERE "a" = %(0)s') ss.update_context_id(5) self.assertEqual(ss.get_context(), {'5': 'b'}) - self.assertEqual(str(ss), 'SELECT * FROM table WHERE "a" = ?') + self.assertEqual(str(ss), 'SELECT * FROM table WHERE "a" = %(5)s') def test_additional_rendering(self): ss = SelectStatement( From 47018ee65dc4e1c9e8ba5056a8501fce0cbc8749 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 18 Jun 2014 14:29:19 -0700 Subject: [PATCH 0714/3726] fixing tests --- .../statements/test_assignment_clauses.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/cqlengine/tests/statements/test_assignment_clauses.py b/cqlengine/tests/statements/test_assignment_clauses.py index 48f58edc5a..b344404e03 100644 --- a/cqlengine/tests/statements/test_assignment_clauses.py +++ b/cqlengine/tests/statements/test_assignment_clauses.py @@ -25,7 +25,7 @@ def test_update_from_none(self): self.assertIsNone(c._removals) self.assertEqual(c.get_context_size(), 1) - self.assertEqual(str(c), '"s" = ?') + self.assertEqual(str(c), '"s" = %(0)s') ctx = {} c.update_context(ctx) @@ -75,7 +75,7 @@ def test_additions(self): self.assertIsNone(c._removals) self.assertEqual(c.get_context_size(), 1) - self.assertEqual(str(c), '"s" = "s" + ?') + self.assertEqual(str(c), '"s" = "s" + %(0)s') ctx = {} c.update_context(ctx) @@ -91,7 +91,7 @@ def test_removals(self): self.assertEqual(c._removals, {3}) self.assertEqual(c.get_context_size(), 1) - self.assertEqual(str(c), '"s" = "s" - ?') + self.assertEqual(str(c), '"s" = "s" - %(0)s') ctx = {} c.update_context(ctx) @@ -107,7 +107,7 @@ def test_additions_and_removals(self): self.assertEqual(c._removals, {1}) self.assertEqual(c.get_context_size(), 2) - self.assertEqual(str(c), '"s" = "s" + ?, "s" = "s" - ?') + self.assertEqual(str(c), '"s" = "s" + %(0)s, "s" = "s" - %(1)s') ctx = {} c.update_context(ctx) @@ -126,7 +126,7 @@ def test_update_from_none(self): self.assertIsNone(c._prepend) self.assertEqual(c.get_context_size(), 1) - self.assertEqual(str(c), '"s" = ?') + self.assertEqual(str(c), '"s" = %(0)s') ctx = {} c.update_context(ctx) @@ -142,7 +142,7 @@ def test_update_from_empty(self): self.assertIsNone(c._prepend) self.assertEqual(c.get_context_size(), 1) - self.assertEqual(str(c), '"s" = ?') + self.assertEqual(str(c), '"s" = %(0)s') ctx = {} c.update_context(ctx) @@ -158,7 +158,7 @@ def test_update_from_different_list(self): self.assertIsNone(c._prepend) self.assertEqual(c.get_context_size(), 1) - self.assertEqual(str(c), '"s" = ?') + self.assertEqual(str(c), '"s" = %(0)s') ctx = {} c.update_context(ctx) @@ -254,7 +254,7 @@ def test_update_from_null(self): self.assertEqual(c._updates, [3, 5]) self.assertEqual(c.get_context_size(), 4) - self.assertEqual(str(c), '"s"[?] = ?, "s"[?] = ?') + self.assertEqual(str(c), '"s"[%(0)s] = %(1)s, "s"[%(2)s] = %(3)s') ctx = {} c.update_context(ctx) @@ -297,7 +297,7 @@ def noop_update(self): c.set_context_id(5) self.assertEqual(c.get_context_size(), 1) - self.assertEqual(str(c), '"a" = "a" + ?') + self.assertEqual(str(c), '"a" = "a" + %(0)s') ctx = {} c.update_context(ctx) From 684a605acb03ba11b04ebdba59b20f3c15d27f68 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 18 Jun 2014 14:33:40 -0700 Subject: [PATCH 0715/3726] fixed tuple assignment issue --- cqlengine/statements.py | 2 +- cqlengine/tests/statements/test_assignment_clauses.py | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/cqlengine/statements.py b/cqlengine/statements.py index dcd9c410a7..169c5b2f83 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -240,7 +240,7 @@ def __unicode__(self): ctx_id += 1 if self._append: - qs += ['"{0}" = "{0}" + %({})s'.format(self.field, ctx_id)] + qs += ['"{0}" = "{{0}}" + %({1})s'.format(self.field, ctx_id)] return ', '.join(qs) diff --git a/cqlengine/tests/statements/test_assignment_clauses.py b/cqlengine/tests/statements/test_assignment_clauses.py index b344404e03..72365800df 100644 --- a/cqlengine/tests/statements/test_assignment_clauses.py +++ b/cqlengine/tests/statements/test_assignment_clauses.py @@ -190,7 +190,7 @@ def test_prepend(self): self.assertEqual(c._prepend, [1, 2]) self.assertEqual(c.get_context_size(), 1) - self.assertEqual(str(c), '"s" = ? + "s"') + self.assertEqual(str(c), '"s" = %(0)s + "s"') ctx = {} c.update_context(ctx) @@ -225,7 +225,7 @@ def test_shrinking_list_update(self): self.assertIsNone(c._prepend) self.assertEqual(c.get_context_size(), 1) - self.assertEqual(str(c), '"s" = ?') + self.assertEqual(str(c), '"s" = %(0)s') ctx = {} c.update_context(ctx) @@ -241,7 +241,7 @@ def test_update(self): self.assertEqual(c._updates, [3, 5]) self.assertEqual(c.get_context_size(), 4) - self.assertEqual(str(c), '"s"[?] = ?, "s"[?] = ?') + self.assertEqual(str(c), '"s"[%(0)s] = %(1)s, "s"[%(2)s] = %(3)s') ctx = {} c.update_context(ctx) @@ -313,7 +313,7 @@ def test_update(self): self.assertEqual(c._removals, [1, 5]) self.assertEqual(c.get_context_size(), 2) - self.assertEqual(str(c), '"s"[?], "s"[?]') + self.assertEqual(str(c), '"s"[%(0)s], "s"[%(1)s]') ctx = {} c.update_context(ctx) From 43454da71a8bb71d10f563ea45f9696f7eff9c93 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 18 Jun 2014 14:36:45 -0700 Subject: [PATCH 0716/3726] fixed issue with counter placeholders --- cqlengine/statements.py | 2 +- cqlengine/tests/statements/test_assignment_clauses.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cqlengine/statements.py b/cqlengine/statements.py index 169c5b2f83..e9d203b6ba 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -380,7 +380,7 @@ def update_context(self, ctx): def __unicode__(self): delta = self.value - self.previous sign = '-' if delta < 0 else '+' - return '"{0}" = "{0}" {1} %({})s'.format(self.field, sign, self.context_id) + return '"{0}" = "{0}" {1} %({2})s'.format(self.field, sign, self.context_id) class BaseDeleteClause(BaseClause): diff --git a/cqlengine/tests/statements/test_assignment_clauses.py b/cqlengine/tests/statements/test_assignment_clauses.py index 72365800df..6c173029dc 100644 --- a/cqlengine/tests/statements/test_assignment_clauses.py +++ b/cqlengine/tests/statements/test_assignment_clauses.py @@ -207,7 +207,7 @@ def test_append_and_prepend(self): self.assertEqual(c._prepend, [1, 2]) self.assertEqual(c.get_context_size(), 2) - self.assertEqual(str(c), '"s" = ? + "s", "s" = "s" + ?') + self.assertEqual(str(c), '"s" = %(0)s + "s", "s" = "s" + %(1)s') ctx = {} c.update_context(ctx) From 84f39e6b4898af11e2c810a2ebbd051af93c3058 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 18 Jun 2014 14:41:39 -0700 Subject: [PATCH 0717/3726] fixing counter statements --- cqlengine/tests/statements/test_assignment_clauses.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cqlengine/tests/statements/test_assignment_clauses.py b/cqlengine/tests/statements/test_assignment_clauses.py index 6c173029dc..c16d33eb8c 100644 --- a/cqlengine/tests/statements/test_assignment_clauses.py +++ b/cqlengine/tests/statements/test_assignment_clauses.py @@ -275,7 +275,7 @@ def test_positive_update(self): c.set_context_id(5) self.assertEqual(c.get_context_size(), 1) - self.assertEqual(str(c), '"a" = "a" + ?') + self.assertEqual(str(c), '"a" = "a" + %(0)s') ctx = {} c.update_context(ctx) @@ -286,7 +286,7 @@ def test_negative_update(self): c.set_context_id(3) self.assertEqual(c.get_context_size(), 1) - self.assertEqual(str(c), '"a" = "a" - ?') + self.assertEqual(str(c), '"a" = "a" - %(3)s') ctx = {} c.update_context(ctx) From 21d43392b1eea643b5d9290c8f4476f5031a6283 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 18 Jun 2014 14:48:58 -0700 Subject: [PATCH 0718/3726] fixed list append --- cqlengine/statements.py | 2 +- cqlengine/tests/statements/test_assignment_clauses.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cqlengine/statements.py b/cqlengine/statements.py index e9d203b6ba..c055f8e452 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -240,7 +240,7 @@ def __unicode__(self): ctx_id += 1 if self._append: - qs += ['"{0}" = "{{0}}" + %({1})s'.format(self.field, ctx_id)] + qs += ['"{0}" = "{0}" + %({1})s'.format(self.field, ctx_id)] return ', '.join(qs) diff --git a/cqlengine/tests/statements/test_assignment_clauses.py b/cqlengine/tests/statements/test_assignment_clauses.py index c16d33eb8c..efa7129c56 100644 --- a/cqlengine/tests/statements/test_assignment_clauses.py +++ b/cqlengine/tests/statements/test_assignment_clauses.py @@ -174,7 +174,7 @@ def test_append(self): self.assertIsNone(c._prepend) self.assertEqual(c.get_context_size(), 1) - self.assertEqual(str(c), '"s" = "s" + ?') + self.assertEqual(str(c), '"s" = "s" + %(0)s') ctx = {} c.update_context(ctx) From 0b161a4b9c7843fa25f8e0df9c62215279097873 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 18 Jun 2014 14:51:22 -0700 Subject: [PATCH 0719/3726] fixed counter test --- cqlengine/tests/statements/test_assignment_clauses.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cqlengine/tests/statements/test_assignment_clauses.py b/cqlengine/tests/statements/test_assignment_clauses.py index efa7129c56..32da18b228 100644 --- a/cqlengine/tests/statements/test_assignment_clauses.py +++ b/cqlengine/tests/statements/test_assignment_clauses.py @@ -275,7 +275,7 @@ def test_positive_update(self): c.set_context_id(5) self.assertEqual(c.get_context_size(), 1) - self.assertEqual(str(c), '"a" = "a" + %(0)s') + self.assertEqual(str(c), '"a" = "a" + %(5)s') ctx = {} c.update_context(ctx) From d66ec78756fdd982ab5b6c78b9144ace8e4dbcbc Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 18 Jun 2014 14:52:54 -0700 Subject: [PATCH 0720/3726] fixed string formatting --- cqlengine/statements.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cqlengine/statements.py b/cqlengine/statements.py index c055f8e452..7e39442369 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -428,7 +428,7 @@ def get_context_size(self): def __unicode__(self): if not self._analyzed: self._analyze() - return ', '.join(['"{}"[%({})s]'.format(self.field) for i in range(len(self._removals))]) + return ', '.join(['"{}"[%({})s]'.format(self.field, self.context_id + i) for i in range(len(self._removals))]) class BaseCQLStatement(object): From 52c736b57d2722d1d578086d68fec8ed2bae2cad Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 18 Jun 2014 15:07:58 -0700 Subject: [PATCH 0721/3726] fixed update clause --- cqlengine/tests/statements/test_base_clause.py | 2 +- cqlengine/tests/statements/test_delete_statement.py | 4 ++-- cqlengine/tests/statements/test_update_statement.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cqlengine/tests/statements/test_base_clause.py b/cqlengine/tests/statements/test_base_clause.py index 746cafae31..c5bbeb402d 100644 --- a/cqlengine/tests/statements/test_base_clause.py +++ b/cqlengine/tests/statements/test_base_clause.py @@ -8,7 +8,7 @@ def test_context_updating(self): ss = BaseClause('a', 'b') assert ss.get_context_size() == 1 - ctx = [] + ctx = {} ss.set_context_id(10) ss.update_context(ctx) assert ctx == {'10': 'b'} diff --git a/cqlengine/tests/statements/test_delete_statement.py b/cqlengine/tests/statements/test_delete_statement.py index ada5b7209f..f3b0df9ffd 100644 --- a/cqlengine/tests/statements/test_delete_statement.py +++ b/cqlengine/tests/statements/test_delete_statement.py @@ -31,7 +31,7 @@ def test_table_rendering(self): def test_where_clause_rendering(self): ds = DeleteStatement('table', None) ds.add_where_clause(WhereClause('a', EqualsOperator(), 'b')) - self.assertEqual(unicode(ds), 'DELETE FROM table WHERE "a" = ?', unicode(ds)) + self.assertEqual(unicode(ds), 'DELETE FROM table WHERE "a" = %(0)s', unicode(ds)) def test_context_update(self): ds = DeleteStatement('table', None) @@ -39,7 +39,7 @@ def test_context_update(self): ds.add_where_clause(WhereClause('a', EqualsOperator(), 'b')) ds.update_context_id(7) - self.assertEqual(unicode(ds), 'DELETE "d"[?] FROM table WHERE "a" = ?') + self.assertEqual(unicode(ds), 'DELETE "d"[%(8)s] FROM table WHERE "a" = %(7)s') self.assertEqual(ds.get_context(), {'7': 'b', '8': 3}) def test_context(self): diff --git a/cqlengine/tests/statements/test_update_statement.py b/cqlengine/tests/statements/test_update_statement.py index b98cfae9b0..fae69bbd8e 100644 --- a/cqlengine/tests/statements/test_update_statement.py +++ b/cqlengine/tests/statements/test_update_statement.py @@ -31,7 +31,7 @@ def test_context_update(self): us.add_assignment_clause(AssignmentClause('c', 'd')) us.add_where_clause(WhereClause('a', EqualsOperator(), 'x')) us.update_context_id(3) - self.assertEqual(unicode(us), 'UPDATE table SET "a" = ?, "c" = ? WHERE "a" = ?') + self.assertEqual(unicode(us), 'UPDATE table SET "a" = %(4)s, "c" = %(5)s WHERE "a" = %(3)s') self.assertEqual(us.get_context(), {'4': 'b', '5': 'd', '3': 'x'}) def test_additional_rendering(self): From 82ebcac4ddd68b3fb6cad74a51777ba725b0092b Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 18 Jun 2014 15:17:17 -0700 Subject: [PATCH 0722/3726] actually executing against native proto now, need to gut out the remaining connection pool calls --- cqlengine/connection.py | 1 - cqlengine/statements.py | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index 5e89c60dc5..b6e5d1f635 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -272,7 +272,6 @@ def execute_native(query, params=None, consistency_level=None): params = query.get_context() query = str(query) params = params or {} - result = session.execute(query, params) return result diff --git a/cqlengine/statements.py b/cqlengine/statements.py index 7e39442369..46b4587590 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -626,7 +626,7 @@ def __unicode__(self): qs += ["({})".format(', '.join(['"{}"'.format(c) for c in columns]))] qs += ['VALUES'] - qs += ["({})".format(', '.join(['?' for v in values]))] + qs += ["({})".format(', '.join(['%({})s'.format(v) for v in values]))] if self.ttl: qs += ["USING TTL {}".format(self.ttl)] From 0761453e402c64a867d86832b6f22d7e74dd241c Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 18 Jun 2014 15:29:26 -0700 Subject: [PATCH 0723/3726] trying to rig up the old RowResult --- cqlengine/connection.py | 3 ++- cqlengine/query.py | 7 +++++-- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index b6e5d1f635..1f3be21150 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -273,7 +273,8 @@ def execute_native(query, params=None, consistency_level=None): query = str(query) params = params or {} result = session.execute(query, params) - return result + import ipdb; ipdb.set_trace() + return ([], result) def get_session(): return session diff --git a/cqlengine/query.py b/cqlengine/query.py index 4ea884ee6b..ef92f92369 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -231,7 +231,9 @@ def _execute(self, q): if self._batch: return self._batch.add_query(q) else: - return execute(q, consistency_level=self._consistency) + result = execute_native(q, consistency_level=self._consistency) + import ipdb; ipdb.set_trace() + return result def __unicode__(self): return unicode(self._select_query()) @@ -775,7 +777,8 @@ def _execute(self, q): if self._batch: return self._batch.add_query(q) else: - return execute_native(q, consistency_level=self._consistency) + tmp = execute_native(q, consistency_level=self._consistency) + return tmp def batch(self, batch_obj): if batch_obj is not None and not isinstance(batch_obj, BatchQuery): From 91ef3c32f46554fb33d703e391860577fbe30f70 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 18 Jun 2014 16:28:07 -0700 Subject: [PATCH 0724/3726] fixed TTL queries --- cqlengine/connection.py | 12 +++++++++-- cqlengine/management.py | 6 +++--- cqlengine/query.py | 1 - .../tests/statements/test_insert_statement.py | 4 ++-- cqlengine/tests/test_ttl.py | 20 ++++++++++++++----- 5 files changed, 30 insertions(+), 13 deletions(-) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index 1f3be21150..c7274e8170 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -24,6 +24,7 @@ from thrift.transport.TTransport import TTransportException from cqlengine.statements import BaseCQLStatement +from cassandra.query import dict_factory LOG = logging.getLogger('cqlengine.cql') @@ -83,6 +84,7 @@ def setup( cluster = Cluster(hosts) session = cluster.connect() + session.row_factory = dict_factory _max_connections = max_connections @@ -273,8 +275,14 @@ def execute_native(query, params=None, consistency_level=None): query = str(query) params = params or {} result = session.execute(query, params) - import ipdb; ipdb.set_trace() - return ([], result) + + if result: + keys = result[0].keys() + else: + keys = [] + + return QueryResult(keys, result) + def get_session(): return session diff --git a/cqlengine/management.py b/cqlengine/management.py index c0562bab89..14a0203fb7 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -229,13 +229,13 @@ def get_fields(model): # Tables containing only primary keys do not appear to create # any entries in system.schema_columns, as only non-primary-key attributes # appear to be inserted into the schema_columns table - if not tmp: + if not tmp[1]: return [] try: - return [Field(x.column_name, x.validator) for x in tmp if x.type == 'regular'] + return [Field(x['column_name'], x['validator']) for x in tmp[1] if x['type'] == 'regular'] except ValueError: - return [Field(x.column_name, x.validator) for x in tmp] + return [Field(x['column_name'], x['validator']) for x in tmp[1]] # convert to Field named tuples diff --git a/cqlengine/query.py b/cqlengine/query.py index ef92f92369..94ae98675b 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -232,7 +232,6 @@ def _execute(self, q): return self._batch.add_query(q) else: result = execute_native(q, consistency_level=self._consistency) - import ipdb; ipdb.set_trace() return result def __unicode__(self): diff --git a/cqlengine/tests/statements/test_insert_statement.py b/cqlengine/tests/statements/test_insert_statement.py index eeffe556d9..32bb74350d 100644 --- a/cqlengine/tests/statements/test_insert_statement.py +++ b/cqlengine/tests/statements/test_insert_statement.py @@ -17,7 +17,7 @@ def test_statement(self): self.assertEqual( unicode(ist), - 'INSERT INTO table ("a", "c") VALUES (?, ?)' + 'INSERT INTO table ("a", "c") VALUES (%(0)s, %(1)s)' ) def test_context_update(self): @@ -28,7 +28,7 @@ def test_context_update(self): ist.update_context_id(4) self.assertEqual( unicode(ist), - 'INSERT INTO table ("a", "c") VALUES (?, ?)' + 'INSERT INTO table ("a", "c") VALUES (%(4)s, %(5)s)' ) ctx = ist.get_context() self.assertEqual(ctx, {'4': 'b', '5': 'd'}) diff --git a/cqlengine/tests/test_ttl.py b/cqlengine/tests/test_ttl.py index c08a1a19e0..b55254a5eb 100644 --- a/cqlengine/tests/test_ttl.py +++ b/cqlengine/tests/test_ttl.py @@ -4,7 +4,8 @@ from uuid import uuid4 from cqlengine import columns import mock -from cqlengine.connection import ConnectionPool +from cqlengine.connection import ConnectionPool, get_session + class TestTTLModel(Model): id = columns.UUID(primary_key=True, default=lambda:uuid4()) @@ -39,7 +40,9 @@ class TTLModelTests(BaseTTLTest): def test_ttl_included_on_create(self): """ tests that ttls on models work as expected """ - with mock.patch.object(ConnectionPool, 'execute') as m: + session = get_session() + + with mock.patch.object(session, 'execute') as m: TestTTLModel.ttl(60).create(text="hello blake") query = m.call_args[0][0] @@ -56,8 +59,10 @@ def test_queryset_is_returned_on_class(self): class TTLInstanceUpdateTest(BaseTTLTest): def test_update_includes_ttl(self): + session = get_session() + model = TestTTLModel.create(text="goodbye blake") - with mock.patch.object(ConnectionPool, 'execute') as m: + with mock.patch.object(session, 'execute') as m: model.ttl(60).update(text="goodbye forever") query = m.call_args[0][0] @@ -84,22 +89,27 @@ def test_instance_is_returned(self): self.assertEqual(60, o._ttl) def test_ttl_is_include_with_query_on_update(self): + session = get_session() + o = TestTTLModel.create(text="whatever") o.text = "new stuff" o = o.ttl(60) - with mock.patch.object(ConnectionPool, 'execute') as m: + with mock.patch.object(session, 'execute') as m: o.save() + query = m.call_args[0][0] self.assertIn("USING TTL", query) class TTLBlindUpdateTest(BaseTTLTest): def test_ttl_included_with_blind_update(self): + session = get_session() + o = TestTTLModel.create(text="whatever") tid = o.id - with mock.patch.object(ConnectionPool, 'execute') as m: + with mock.patch.object(session, 'execute') as m: TestTTLModel.objects(id=tid).ttl(60).update(text="bacon") query = m.call_args[0][0] From fd236ff5d6df040dade65c2829d11b7af6511c51 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 18 Jun 2014 17:02:21 -0700 Subject: [PATCH 0725/3726] trying to properly create objects again --- cqlengine/connection.py | 13 +++++++------ cqlengine/management.py | 4 ++-- cqlengine/tests/model/test_model_io.py | 1 + 3 files changed, 10 insertions(+), 8 deletions(-) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index c7274e8170..732fc0bd63 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -84,7 +84,7 @@ def setup( cluster = Cluster(hosts) session = cluster.connect() - session.row_factory = dict_factory + #session.row_factory = dict_factory _max_connections = max_connections @@ -262,10 +262,11 @@ def execute(query, params=None, consistency_level=None): @contextmanager def connection_manager(): """ :rtype: ConnectionPool """ - global connection_pool - # tmp = connection_pool.get() - yield connection_pool - # connection_pool.put(tmp) + raise Exception("deprecated") + # global connection_pool + # # tmp = connection_pool.get() + # yield connection_pool + # # connection_pool.put(tmp) def execute_native(query, params=None, consistency_level=None): @@ -277,7 +278,7 @@ def execute_native(query, params=None, consistency_level=None): result = session.execute(query, params) if result: - keys = result[0].keys() + keys = [x for x in result[0]._fields] else: keys = [] diff --git a/cqlengine/management.py b/cqlengine/management.py index 14a0203fb7..142ea7a7bb 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -233,9 +233,9 @@ def get_fields(model): return [] try: - return [Field(x['column_name'], x['validator']) for x in tmp[1] if x['type'] == 'regular'] + return [Field(x.column_name, x.validator) for x in tmp[1] if x.type == 'regular'] except ValueError: - return [Field(x['column_name'], x['validator']) for x in tmp[1]] + return [Field(x.column_name, xvalidator) for x in tmp[1]] # convert to Field named tuples diff --git a/cqlengine/tests/model/test_model_io.py b/cqlengine/tests/model/test_model_io.py index e20e588571..8a6306249c 100644 --- a/cqlengine/tests/model/test_model_io.py +++ b/cqlengine/tests/model/test_model_io.py @@ -98,6 +98,7 @@ def test_column_deleting_works_properly(self): tm.save() tm2 = TestModel.objects(id=tm.pk).first() + assert tm2.text is None assert tm2._values['text'].previous_value is None From c359ac918115e636ec9355e19cd6ac55a1964ade Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 18 Jun 2014 17:46:57 -0700 Subject: [PATCH 0726/3726] additional checks for basic io, trying to track down why Rows aren't becoming models --- cqlengine/tests/model/test_model_io.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/cqlengine/tests/model/test_model_io.py b/cqlengine/tests/model/test_model_io.py index 8a6306249c..7c26bae4a3 100644 --- a/cqlengine/tests/model/test_model_io.py +++ b/cqlengine/tests/model/test_model_io.py @@ -39,7 +39,10 @@ def test_model_save_and_load(self): Tests that models can be saved and retrieved """ tm = TestModel.create(count=8, text='123456789') + self.assertIsInstance(tm, TestModel) + tm2 = TestModel.objects(id=tm.pk).first() + self.assertIsInstance(tm2, TestModel) for cname in tm._columns.keys(): self.assertEquals(getattr(tm, cname), getattr(tm2, cname)) @@ -98,6 +101,7 @@ def test_column_deleting_works_properly(self): tm.save() tm2 = TestModel.objects(id=tm.pk).first() + self.assertIsInstance(tm2, TestModel) assert tm2.text is None assert tm2._values['text'].previous_value is None @@ -286,7 +290,9 @@ def tearDownClass(cls): def test_query_with_date(self): uid = uuid4() day = date(2013, 11, 26) - TestQueryModel.create(test_id=uid, date=day, description=u'foo') + obj = TestQueryModel.create(test_id=uid, date=day, description=u'foo') + + self.assertEqual(obj.description, u'foo') inst = TestQueryModel.filter( TestQueryModel.test_id == uid, From 421119bb69463ba214cb5435f7c98c494d7cb381 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 18 Jun 2014 18:43:15 -0700 Subject: [PATCH 0727/3726] fixed the model construction, cache is broken though --- cqlengine/connection.py | 13 ++++++------- cqlengine/management.py | 6 ++---- cqlengine/models.py | 11 +++++++++-- cqlengine/query.py | 13 +++++++------ 4 files changed, 24 insertions(+), 19 deletions(-) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index 732fc0bd63..9983aeac57 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -84,7 +84,8 @@ def setup( cluster = Cluster(hosts) session = cluster.connect() - #session.row_factory = dict_factory + session.row_factory = dict_factory + return _max_connections = max_connections @@ -249,6 +250,8 @@ def execute(self, query, params, consistency_level=None): def execute(query, params=None, consistency_level=None): + raise Exception("shut up") + if isinstance(query, BaseCQLStatement): params = query.get_context() query = str(query) @@ -274,15 +277,11 @@ def execute_native(query, params=None, consistency_level=None): if isinstance(query, BaseCQLStatement): params = query.get_context() query = str(query) + params = params or {} result = session.execute(query, params) - if result: - keys = [x for x in result[0]._fields] - else: - keys = [] - - return QueryResult(keys, result) + return result def get_session(): diff --git a/cqlengine/management.py b/cqlengine/management.py index 142ea7a7bb..a0716002da 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -229,13 +229,11 @@ def get_fields(model): # Tables containing only primary keys do not appear to create # any entries in system.schema_columns, as only non-primary-key attributes # appear to be inserted into the schema_columns table - if not tmp[1]: - return [] try: - return [Field(x.column_name, x.validator) for x in tmp[1] if x.type == 'regular'] + return [Field(x['column_name'], x['validator']) for x in tmp if x['type'] == 'regular'] except ValueError: - return [Field(x.column_name, xvalidator) for x in tmp[1]] + return [Field(x['column_name'], x['validator']) for x in tmp] # convert to Field named tuples diff --git a/cqlengine/models.py b/cqlengine/models.py index 7791da0ec8..028ab658e6 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -307,12 +307,17 @@ def _get_model_by_polymorphic_key(cls, key): return cls._polymorphic_map.get(key) @classmethod - def _construct_instance(cls, names, values): + def _construct_instance(cls, values): """ method used to construct instances from query results this is where polymorphic deserialization occurs """ - field_dict = dict((cls._db_map.get(k, k), v) for k, v in zip(names, values)) + # we're going to take the values, which is from the DB as a dict + # and translate that into our local fields + # the db_map is a db_field -> model field map + items = values.items() + field_dict = dict([(cls._db_map.get(k, k),v) for k,v in items]) + if cls._is_polymorphic: poly_key = field_dict.get(cls._polymorphic_column_name) @@ -703,6 +708,8 @@ def _get_polymorphic_base(bases): attrs['_columns'] = column_dict attrs['_primary_keys'] = primary_keys attrs['_defined_columns'] = defined_columns + + # maps the database field to the models key attrs['_db_map'] = db_map attrs['_pk_name'] = pk_name attrs['_dynamic_columns'] = {} diff --git a/cqlengine/query.py b/cqlengine/query.py index 94ae98675b..87a7d17624 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -6,6 +6,7 @@ from cqlengine.connection import execute, RowResult, execute_native + from cqlengine.exceptions import CQLEngineException, ValidationError from cqlengine.functions import Token, BaseQueryFunction, QueryValue @@ -293,8 +294,8 @@ def _execute_query(self): if self._batch: raise CQLEngineException("Only inserts, updates, and deletes are available in batch mode") if self._result_cache is None: - columns, self._result_cache = self._execute(self._select_query()) - self._construct_result = self._get_result_constructor(columns) + self._result_cache = self._execute(self._select_query()) + self._construct_result = self._get_result_constructor() def _fill_result_cache_to_idx(self, idx): self._execute_query() @@ -319,7 +320,7 @@ def __iter__(self): for idx in range(len(self._result_cache)): instance = self._result_cache[idx] - if isinstance(instance, RowResult): + if isinstance(instance, dict): self._fill_result_cache_to_idx(idx) yield self._result_cache[idx] @@ -350,7 +351,7 @@ def __getitem__(self, s): self._fill_result_cache_to_idx(s) return self._result_cache[s] - def _get_result_constructor(self, names): + def _get_result_constructor(self): """ Returns a function that will be used to instantiate query results """ @@ -650,10 +651,10 @@ def _select_fields(self): return [self.model._columns[f].db_field_name for f in fields] return super(ModelQuerySet, self)._select_fields() - def _get_result_constructor(self, names): + def _get_result_constructor(self): """ Returns a function that will be used to instantiate query results """ if not self._values_list: - return lambda values: self.model._construct_instance(names, values) + return lambda rows: self.model._construct_instance(rows) else: columns = [self.model._columns[n] for n in names] if self._flat_values_list: From 7d58249b69082948cc36ad7392dc42a57c80a0f9 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 18 Jun 2014 19:09:02 -0700 Subject: [PATCH 0728/3726] starting on fixing value lists --- cqlengine/query.py | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index 87a7d17624..a1caba9de1 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -653,14 +653,19 @@ def _select_fields(self): def _get_result_constructor(self): """ Returns a function that will be used to instantiate query results """ - if not self._values_list: + if not self._values_list: # we want models return lambda rows: self.model._construct_instance(rows) + elif self._flat_values_list: # the user has requested flattened tuples + return lambda rows: self._get_tuple_list_flat(rows) else: - columns = [self.model._columns[n] for n in names] - if self._flat_values_list: - return lambda values: columns[0].to_python(values[0]) - else: - return lambda values: map(lambda (c, v): c.to_python(v), zip(columns, values)) + return lambda rows: self._get_tuple_list_nested(rows) + + + def _get_tuple_list_nested(self, rows): + return map(lambda (c, v): c.to_python(v), zip(columns, rows)) + + def _get_tuple_list_flat(self, rows): + return columns[0].to_python(rows[0]) def _get_ordering_condition(self, colname): colname, order_type = super(ModelQuerySet, self)._get_ordering_condition(colname) From 997c5a85c5bc7cf64f73fa2278d16a6bc8113370 Mon Sep 17 00:00:00 2001 From: Kevin Deldycke Date: Thu, 19 Jun 2014 11:59:01 +0200 Subject: [PATCH 0729/3726] Dumb down table properties unit tests. --- cqlengine/tests/management/test_management.py | 95 +++++++++++++------ 1 file changed, 67 insertions(+), 28 deletions(-) diff --git a/cqlengine/tests/management/test_management.py b/cqlengine/tests/management/test_management.py index f0d7c6e909..d09c9e5543 100644 --- a/cqlengine/tests/management/test_management.py +++ b/cqlengine/tests/management/test_management.py @@ -152,39 +152,78 @@ def test_add_column(self): self.assertEqual(len(fields), 4) -class TablePropertiesTests(BaseCassEngTestCase): +class ModelWithTableProperties(Model): + # Set random table properties + __bloom_filter_fp_chance__ = 0.76328 + __caching__ = ALL + __comment__ = 'TxfguvBdzwROQALmQBOziRMbkqVGFjqcJfVhwGR' + __default_time_to_live__ = 4756 + __gc_grace_seconds__ = 2063 + __index_interval__ = 98706 + __memtable_flush_period_in_ms__ = 43681 + __populate_io_cache_on_flush__ = True + __read_repair_chance__ = 0.17985 + __replicate_on_write__ = False + __dclocal_read_repair_chance__ = 0.50811 + + key = columns.UUID(primary_key=True) + - table_properties = { - 'bloom_filter_fp_chance': random.randint(0, 99999) / 100000.0, - 'caching': random.choice([ALL, KEYS_ONLY, ROWS_ONLY, NONE]), - 'comment': ''.join(random.choice(string.ascii_letters) - for i in range(random.randint(4, 99))), - 'dclocal_read_repair_chance': random.randint(0, 99999) / 100000.0, - 'default_time_to_live': random.randint(0, 99999), - 'gc_grace_seconds': random.randint(0, 99999), - 'index_interval': random.randint(0, 99999), - 'memtable_flush_period_in_ms': random.randint(0, 99999), - 'populate_io_cache_on_flush': random.choice([True, False]), - 'read_repair_chance': random.randint(0, 99999) / 100000.0, - 'replicate_on_write': random.choice([True, False]), - } +class TablePropertiesTests(BaseCassEngTestCase): def setUp(self): - delete_table(FirstModel) + delete_table(ModelWithTableProperties) def test_set_table_properties(self): - for property_name, property_value in self.table_properties.items(): - property_id = '__{}__'.format(property_name) - setattr(FirstModel, property_id, property_value) - sync_table(FirstModel) - table_settings = management.get_table_settings(FirstModel) - for property_name, property_value in self.table_properties.items(): - if property_name == 'dclocal_read_repair_chance': - # For some reason 'dclocal_read_repair_chance' in CQL is called - # just 'local_read_repair_chance' in the schema table. - # Source: https://issues.apache.org/jira/browse/CASSANDRA-6717 - property_name = 'local_read_repair_chance' - assert table_settings[property_name] == property_value + create_table(ModelWithTableProperties) + self.assertDictContainsSubset({ + 'bloom_filter_fp_chance': 0.76328, + 'caching': ALL, + 'comment': 'TxfguvBdzwROQALmQBOziRMbkqVGFjqcJfVhwGR', + 'default_time_to_live': 4756, + 'gc_grace_seconds': 2063, + 'index_interval': 98706, + 'memtable_flush_period_in_ms': 43681, + 'populate_io_cache_on_flush': True, + 'read_repair_chance': 0.17985, + 'replicate_on_write': False, + # For some reason 'dclocal_read_repair_chance' in CQL is called + # just 'local_read_repair_chance' in the schema table. + # Source: https://issues.apache.org/jira/browse/CASSANDRA-6717 + 'local_read_repair_chance': 0.50811, + }, management.get_table_settings(ModelWithTableProperties)) + + def test_table_property_update(self): + # Create vanilla table + #sync_table(ModelWithTableProperties) + + ModelWithTableProperties.__bloom_filter_fp_chance__ = 0.66778 + ModelWithTableProperties.__caching__ = NONE + ModelWithTableProperties.__comment__ = 'xirAkRWZVVvsmzRvXamiEcQkshkUIDINVJZgLYSdnGHweiBrAiJdLJkVohdRy' + ModelWithTableProperties.__default_time_to_live__ = 65178 + ModelWithTableProperties.__gc_grace_seconds__ = 96362 + ModelWithTableProperties.__index_interval__ = 94207 + ModelWithTableProperties.__memtable_flush_period_in_ms__ = 60210 + ModelWithTableProperties.__populate_io_cache_on_flush__ = False + ModelWithTableProperties.__read_repair_chance__ = 0.2989 + ModelWithTableProperties.__replicate_on_write__ = True + ModelWithTableProperties.__dclocal_read_repair_chance__ = 0.12732 + + sync_table(ModelWithTableProperties) + + self.assertDictContainsSubset({ + 'bloom_filter_fp_chance': 0.66778, + 'caching': NONE, + 'comment': 'xirAkRWZVVvsmzRvXamiEcQkshkUIDINVJZgLYSdnGHweiBrAiJdLJkVohdRy', + 'default_time_to_live': 65178, + 'gc_grace_seconds': 96362, + 'index_interval': 94207, + 'memtable_flush_period_in_ms': 60210, + 'populate_io_cache_on_flush': False, + 'read_repair_chance': 0.2989, + 'replicate_on_write': True, + 'local_read_repair_chance': 0.12732, + }, management.get_table_settings(ModelWithTableProperties)) class SyncTableTests(BaseCassEngTestCase): From 3c56fec1d8d79d6037d3b83703608d4889994012 Mon Sep 17 00:00:00 2001 From: Kevin Deldycke Date: Thu, 19 Jun 2014 12:01:29 +0200 Subject: [PATCH 0730/3726] Remove dead code and unused imports. --- cqlengine/tests/management/test_management.py | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/cqlengine/tests/management/test_management.py b/cqlengine/tests/management/test_management.py index d09c9e5543..49ede1142b 100644 --- a/cqlengine/tests/management/test_management.py +++ b/cqlengine/tests/management/test_management.py @@ -1,9 +1,6 @@ -import random -import string - from mock import MagicMock, patch -from cqlengine import ONE, ALL, KEYS_ONLY, ROWS_ONLY, NONE +from cqlengine import ONE, ALL, NONE from cqlengine.exceptions import CQLEngineException from cqlengine.management import create_table, delete_table, get_fields, sync_table from cqlengine.tests.base import BaseCassEngTestCase @@ -194,9 +191,6 @@ def test_set_table_properties(self): }, management.get_table_settings(ModelWithTableProperties)) def test_table_property_update(self): - # Create vanilla table - #sync_table(ModelWithTableProperties) - ModelWithTableProperties.__bloom_filter_fp_chance__ = 0.66778 ModelWithTableProperties.__caching__ = NONE ModelWithTableProperties.__comment__ = 'xirAkRWZVVvsmzRvXamiEcQkshkUIDINVJZgLYSdnGHweiBrAiJdLJkVohdRy' From c80d463b8d4f19909e022233fb3412a3b5c21244 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 19 Jun 2014 09:12:49 -0700 Subject: [PATCH 0731/3726] note in test so I know what exactly is supposed to come back --- cqlengine/tests/model/test_clustering_order.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cqlengine/tests/model/test_clustering_order.py b/cqlengine/tests/model/test_clustering_order.py index 92d8e067e8..528ea2e01b 100644 --- a/cqlengine/tests/model/test_clustering_order.py +++ b/cqlengine/tests/model/test_clustering_order.py @@ -32,4 +32,5 @@ def test_clustering_order(self): TestModel.create(id=1, clustering_key=i) values = list(TestModel.objects.values_list('clustering_key', flat=True)) + # [19L, 18L, 17L, 16L, 15L, 14L, 13L, 12L, 11L, 10L, 9L, 8L, 7L, 6L, 5L, 4L, 3L, 2L, 1L, 0L] self.assertEquals(values, sorted(items, reverse=True)) From 3d7d97528ae9087269defa340cee902d17481caf Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 19 Jun 2014 09:16:05 -0700 Subject: [PATCH 0732/3726] fixed docs around values lists while i'm in here --- docs/topics/queryset.rst | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/docs/topics/queryset.rst b/docs/topics/queryset.rst index a8eb2af576..ddb2437152 100644 --- a/docs/topics/queryset.rst +++ b/docs/topics/queryset.rst @@ -268,6 +268,17 @@ Values Lists There is a special QuerySet's method ``.values_list()`` - when called, QuerySet returns lists of values instead of model instances. It may significantly speedup things with lower memory footprint for large responses. Each tuple contains the value from the respective field passed into the ``values_list()`` call — so the first item is the first field, etc. For example: + .. code-block:: python + + items = list(range(20)) + random.shuffle(items) + for i in items: + TestModel.create(id=1, clustering_key=i) + + values = list(TestModel.objects.values_list('clustering_key', flat=True)) + # [19L, 18L, 17L, 16L, 15L, 14L, 13L, 12L, 11L, 10L, 9L, 8L, 7L, 6L, 5L, 4L, 3L, 2L, 1L, 0L] + + Batch Queries ============= From ed2749f235655c8d8d66776671c37492f2e2bfb7 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 19 Jun 2014 09:22:28 -0700 Subject: [PATCH 0733/3726] fixed docs error --- docs/topics/queryset.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/topics/queryset.rst b/docs/topics/queryset.rst index ddb2437152..a8dca85356 100644 --- a/docs/topics/queryset.rst +++ b/docs/topics/queryset.rst @@ -269,7 +269,7 @@ Values Lists Each tuple contains the value from the respective field passed into the ``values_list()`` call — so the first item is the first field, etc. For example: .. code-block:: python - + items = list(range(20)) random.shuffle(items) for i in items: From 5f2dce51a5269ba368f5ac71c366c22ea1fea27e Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 19 Jun 2014 09:30:10 -0700 Subject: [PATCH 0734/3726] test around clustering order --- .../tests/model/test_clustering_order.py | 25 +++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) diff --git a/cqlengine/tests/model/test_clustering_order.py b/cqlengine/tests/model/test_clustering_order.py index 92d8e067e8..98e1277b87 100644 --- a/cqlengine/tests/model/test_clustering_order.py +++ b/cqlengine/tests/model/test_clustering_order.py @@ -1,7 +1,7 @@ import random from cqlengine.tests.base import BaseCassEngTestCase -from cqlengine.management import create_table +from cqlengine.management import sync_table from cqlengine.management import delete_table from cqlengine.models import Model from cqlengine import columns @@ -10,12 +10,17 @@ class TestModel(Model): id = columns.Integer(primary_key=True) clustering_key = columns.Integer(primary_key=True, clustering_order='desc') +class TestClusteringComplexModel(Model): + id = columns.Integer(primary_key=True) + clustering_key = columns.Integer(primary_key=True, clustering_order='desc') + some_value = columns.Integer() + class TestClusteringOrder(BaseCassEngTestCase): @classmethod def setUpClass(cls): super(TestClusteringOrder, cls).setUpClass() - create_table(TestModel) + sync_table(TestModel) @classmethod def tearDownClass(cls): @@ -33,3 +38,19 @@ def test_clustering_order(self): values = list(TestModel.objects.values_list('clustering_key', flat=True)) self.assertEquals(values, sorted(items, reverse=True)) + + def test_clustering_order_more_complex(self): + """ + Tests that models can be saved and retrieved + """ + sync_table(TestClusteringComplexModel) + + items = list(range(20)) + random.shuffle(items) + for i in items: + TestClusteringComplexModel.create(id=1, clustering_key=i, some_value=2) + + values = list(TestClusteringComplexModel.objects.values_list('some_value', flat=True)) + + self.assertEquals([2] * 20, values) + delete_table(TestClusteringComplexModel) From 4a2d3b9f9ce6b2eaf311daffd47f6c3e92f3ea44 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 19 Jun 2014 09:35:42 -0700 Subject: [PATCH 0735/3726] fixed flat query --- cqlengine/query.py | 6 +----- cqlengine/tests/model/test_clustering_order.py | 6 +++--- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index a1caba9de1..92f0823284 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -656,17 +656,13 @@ def _get_result_constructor(self): if not self._values_list: # we want models return lambda rows: self.model._construct_instance(rows) elif self._flat_values_list: # the user has requested flattened tuples - return lambda rows: self._get_tuple_list_flat(rows) + return lambda row: row.popitem()[1] else: return lambda rows: self._get_tuple_list_nested(rows) - def _get_tuple_list_nested(self, rows): return map(lambda (c, v): c.to_python(v), zip(columns, rows)) - def _get_tuple_list_flat(self, rows): - return columns[0].to_python(rows[0]) - def _get_ordering_condition(self, colname): colname, order_type = super(ModelQuerySet, self)._get_ordering_condition(colname) diff --git a/cqlengine/tests/model/test_clustering_order.py b/cqlengine/tests/model/test_clustering_order.py index d523daf8c6..6c94a7f6c9 100644 --- a/cqlengine/tests/model/test_clustering_order.py +++ b/cqlengine/tests/model/test_clustering_order.py @@ -2,7 +2,7 @@ from cqlengine.tests.base import BaseCassEngTestCase from cqlengine.management import sync_table -from cqlengine.management import delete_table +from cqlengine.management import drop_table from cqlengine.models import Model from cqlengine import columns @@ -25,7 +25,7 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): super(TestClusteringOrder, cls).tearDownClass() - delete_table(TestModel) + drop_table(TestModel) def test_clustering_order(self): """ @@ -54,4 +54,4 @@ def test_clustering_order_more_complex(self): values = list(TestClusteringComplexModel.objects.values_list('some_value', flat=True)) self.assertEquals([2] * 20, values) - delete_table(TestClusteringComplexModel) + drop_table(TestClusteringComplexModel) From 879339c01600fbacb7632705cd97c9835d51d2e3 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 19 Jun 2014 09:38:21 -0700 Subject: [PATCH 0736/3726] fixing value lists --- cqlengine/query.py | 2 +- .../model/{test_clustering_order.py => test_value_lists.py} | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) rename cqlengine/tests/model/{test_clustering_order.py => test_value_lists.py} (99%) diff --git a/cqlengine/query.py b/cqlengine/query.py index 92f0823284..95f6529706 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -655,7 +655,7 @@ def _get_result_constructor(self): """ Returns a function that will be used to instantiate query results """ if not self._values_list: # we want models return lambda rows: self.model._construct_instance(rows) - elif self._flat_values_list: # the user has requested flattened tuples + elif self._flat_values_list: # the user has requested flattened list (1 value per row) return lambda row: row.popitem()[1] else: return lambda rows: self._get_tuple_list_nested(rows) diff --git a/cqlengine/tests/model/test_clustering_order.py b/cqlengine/tests/model/test_value_lists.py similarity index 99% rename from cqlengine/tests/model/test_clustering_order.py rename to cqlengine/tests/model/test_value_lists.py index 6c94a7f6c9..8d4c76de5c 100644 --- a/cqlengine/tests/model/test_clustering_order.py +++ b/cqlengine/tests/model/test_value_lists.py @@ -6,6 +6,7 @@ from cqlengine.models import Model from cqlengine import columns + class TestModel(Model): id = columns.Integer(primary_key=True) clustering_key = columns.Integer(primary_key=True, clustering_order='desc') @@ -55,3 +56,4 @@ def test_clustering_order_more_complex(self): self.assertEquals([2] * 20, values) drop_table(TestClusteringComplexModel) + From eed5d3ee7d2b9e31757f89a9ae6cbdde7611aa27 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 19 Jun 2014 09:51:43 -0700 Subject: [PATCH 0737/3726] now calling native execute in batch --- cqlengine/query.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index 95f6529706..71f3345d54 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -174,7 +174,7 @@ def execute(self): query_list.append('APPLY BATCH;') - execute('\n'.join(query_list), parameters, self._consistency) + execute_native('\n'.join(query_list), parameters, self._consistency) self.queries = [] self._execute_callbacks() From f9baed3d99064b748c54c88bd2779fe117367408 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 19 Jun 2014 09:53:36 -0700 Subject: [PATCH 0738/3726] gutting the pool out --- cqlengine/connection.py | 27 --------------------------- cqlengine/tests/test_batch_query.py | 1 - 2 files changed, 28 deletions(-) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index 9983aeac57..90a54a975c 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -85,34 +85,7 @@ def setup( cluster = Cluster(hosts) session = cluster.connect() session.row_factory = dict_factory - return - _max_connections = max_connections - - if default_keyspace: - from cqlengine import models - models.DEFAULT_KEYSPACE = default_keyspace - - _hosts = [] - for host in hosts: - host = host.strip() - host = host.split(':') - if len(host) == 1: - port = 9160 - elif len(host) == 2: - try: - port = int(host[1]) - except ValueError: - raise CQLConnectionError("Can't parse port as int {}".format(':'.join(host))) - else: - raise CQLConnectionError("Can't parse host string {}".format(':'.join(host))) - - _hosts.append(Host(host[0], port)) - - if not _hosts: - raise CQLConnectionError("At least one host required") - - connection_pool = ConnectionPool(_hosts, username, password, consistency, timeout) class ConnectionPool(object): diff --git a/cqlengine/tests/test_batch_query.py b/cqlengine/tests/test_batch_query.py index 7066643d39..9c2d4140c8 100644 --- a/cqlengine/tests/test_batch_query.py +++ b/cqlengine/tests/test_batch_query.py @@ -1,7 +1,6 @@ from unittest import skip from uuid import uuid4 import random -from cqlengine.connection import ConnectionPool import mock import sure From fdf142b19f7069a69cedd230e211aab907028969 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 19 Jun 2014 09:56:50 -0700 Subject: [PATCH 0739/3726] count fixed, mock switched from connection pool to session --- cqlengine/query.py | 4 ++-- cqlengine/tests/model/test_polymorphism.py | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index 71f3345d54..3592400228 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -507,8 +507,8 @@ def count(self): if self._result_cache is None: query = self._select_query() query.count = True - _, result = self._execute(query) - return result[0][0] + result = self._execute(query) + return result[0]['count'] else: return len(self._result_cache) diff --git a/cqlengine/tests/model/test_polymorphism.py b/cqlengine/tests/model/test_polymorphism.py index f950bbcdf8..197fcf712a 100644 --- a/cqlengine/tests/model/test_polymorphism.py +++ b/cqlengine/tests/model/test_polymorphism.py @@ -3,7 +3,7 @@ from cqlengine import columns from cqlengine import models -from cqlengine.connection import ConnectionPool +from cqlengine.connection import ConnectionPool, get_session from cqlengine.tests.base import BaseCassEngTestCase from cqlengine import management @@ -122,8 +122,8 @@ def test_query_deserialization(self): def test_delete_on_polymorphic_subclass_does_not_include_polymorphic_key(self): p1 = Poly1.create() - - with mock.patch.object(ConnectionPool, 'execute') as m: + session = get_session() + with mock.patch.object(session, 'execute') as m: Poly1.objects(partition=p1.partition).delete() # make sure our polymorphic key isn't in the CQL From 794e8dfd8450759be0e6ef7e6839414ec1f68025 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 19 Jun 2014 10:11:45 -0700 Subject: [PATCH 0740/3726] fixing empty map issue --- cqlengine/statements.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cqlengine/statements.py b/cqlengine/statements.py index ce5d5fc47c..ac4f281d6f 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -355,7 +355,7 @@ def __unicode__(self): ctx_id = self.context_id if self.previous is None and not self._updates: - qs += ['"int_map" = %({})s'] + qs += ['"int_map" = %({})s'.format(ctx_id)] else: for _ in self._updates or []: qs += ['"{}"[%({})s] = %({})s'.format(self.field, ctx_id, ctx_id + 1)] From 4eeed3c2dc55bfa5e4d71b4a817e1b36bb911e79 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 19 Jun 2014 10:30:23 -0700 Subject: [PATCH 0741/3726] fix for values list --- cqlengine/query.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index 3592400228..e0219db9a2 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -658,10 +658,13 @@ def _get_result_constructor(self): elif self._flat_values_list: # the user has requested flattened list (1 value per row) return lambda row: row.popitem()[1] else: - return lambda rows: self._get_tuple_list_nested(rows) + return lambda row: self._get_row_value_list(self._only_fields, row) - def _get_tuple_list_nested(self, rows): - return map(lambda (c, v): c.to_python(v), zip(columns, rows)) + def _get_row_value_list(self, fields, row): + result = [] + for x in fields: + result.append(row[x]) + return result def _get_ordering_condition(self, colname): colname, order_type = super(ModelQuerySet, self)._get_ordering_condition(colname) From 1baa2b7faa20b806f773106c52a05465a53301dc Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 19 Jun 2014 10:37:57 -0700 Subject: [PATCH 0742/3726] fixed a few fails --- cqlengine/tests/base.py | 6 ++++++ cqlengine/tests/test_consistency.py | 14 +++++++------- cqlengine/tests/test_timestamp.py | 14 +++++++------- 3 files changed, 20 insertions(+), 14 deletions(-) diff --git a/cqlengine/tests/base.py b/cqlengine/tests/base.py index 35b7cca97a..c8679c1b21 100644 --- a/cqlengine/tests/base.py +++ b/cqlengine/tests/base.py @@ -1,6 +1,7 @@ from unittest import TestCase from cqlengine import connection import os +from cqlengine.connection import get_session if os.environ.get('CASSANDRA_TEST_HOST'): @@ -15,6 +16,11 @@ class BaseCassEngTestCase(TestCase): # @classmethod # def setUpClass(cls): # super(BaseCassEngTestCase, cls).setUpClass() + session = None + + def setUp(self): + self.session = get_session() + super(BaseCassEngTestCase, self).setUp() def assertHasAttr(self, obj, attr): self.assertTrue(hasattr(obj, attr), diff --git a/cqlengine/tests/test_consistency.py b/cqlengine/tests/test_consistency.py index 2f54ae243c..44dbd08ea4 100644 --- a/cqlengine/tests/test_consistency.py +++ b/cqlengine/tests/test_consistency.py @@ -29,7 +29,7 @@ class TestConsistency(BaseConsistencyTest): def test_create_uses_consistency(self): qs = TestConsistencyModel.consistency(ALL) - with mock.patch.object(ConnectionPool, 'execute') as m: + with mock.patch.object(self.session, 'execute') as m: qs.create(text="i am not fault tolerant this way") args = m.call_args @@ -43,7 +43,7 @@ def test_update_uses_consistency(self): t = TestConsistencyModel.create(text="bacon and eggs") t.text = "ham sandwich" - with mock.patch.object(ConnectionPool, 'execute') as m: + with mock.patch.object(self.session, 'execute') as m: t.consistency(ALL).save() args = m.call_args @@ -52,14 +52,14 @@ def test_update_uses_consistency(self): def test_batch_consistency(self): - with mock.patch.object(ConnectionPool, 'execute') as m: + with mock.patch.object(self.session, 'execute') as m: with BatchQuery(consistency=ALL) as b: TestConsistencyModel.batch(b).create(text="monkey") args = m.call_args self.assertEqual(ALL, args[0][2]) - with mock.patch.object(ConnectionPool, 'execute') as m: + with mock.patch.object(self.session, 'execute') as m: with BatchQuery() as b: TestConsistencyModel.batch(b).create(text="monkey") @@ -71,7 +71,7 @@ def test_blind_update(self): t.text = "ham sandwich" uid = t.id - with mock.patch.object(ConnectionPool, 'execute') as m: + with mock.patch.object(self.session, 'execute') as m: TestConsistencyModel.objects(id=uid).consistency(ALL).update(text="grilled cheese") args = m.call_args @@ -84,10 +84,10 @@ def test_delete(self): t.text = "ham and cheese sandwich" uid = t.id - with mock.patch.object(ConnectionPool, 'execute') as m: + with mock.patch.object(self.session, 'execute') as m: t.consistency(ALL).delete() - with mock.patch.object(ConnectionPool, 'execute') as m: + with mock.patch.object(self.session, 'execute') as m: TestConsistencyModel.objects(id=uid).consistency(ALL).delete() args = m.call_args diff --git a/cqlengine/tests/test_timestamp.py b/cqlengine/tests/test_timestamp.py index 261564319f..e59466e999 100644 --- a/cqlengine/tests/test_timestamp.py +++ b/cqlengine/tests/test_timestamp.py @@ -7,7 +7,6 @@ import mock import sure from cqlengine import Model, columns, BatchQuery -from cqlengine.connection import ConnectionPool from cqlengine.management import sync_table from cqlengine.tests.base import BaseCassEngTestCase @@ -28,7 +27,7 @@ def setUpClass(cls): class BatchTest(BaseTimestampTest): def test_batch_is_included(self): - with mock.patch.object(ConnectionPool, "execute") as m, BatchQuery(timestamp=timedelta(seconds=30)) as b: + with mock.patch.object(self.session, "execute") as m, BatchQuery(timestamp=timedelta(seconds=30)) as b: TestTimestampModel.batch(b).create(count=1) "USING TIMESTAMP".should.be.within(m.call_args[0][0]) @@ -37,7 +36,7 @@ def test_batch_is_included(self): class CreateWithTimestampTest(BaseTimestampTest): def test_batch(self): - with mock.patch.object(ConnectionPool, "execute") as m, BatchQuery() as b: + with mock.patch.object(self.session, "execute") as m, BatchQuery() as b: TestTimestampModel.timestamp(timedelta(seconds=10)).batch(b).create(count=1) query = m.call_args[0][0] @@ -46,7 +45,7 @@ def test_batch(self): query.should_not.match(r"TIMESTAMP.*INSERT") def test_timestamp_not_included_on_normal_create(self): - with mock.patch.object(ConnectionPool, "execute") as m: + with mock.patch.object(self.session, "execute") as m: TestTimestampModel.create(count=2) "USING TIMESTAMP".shouldnt.be.within(m.call_args[0][0]) @@ -62,7 +61,7 @@ def test_non_batch_syntax_integration(self): def test_non_batch_syntax_unit(self): - with mock.patch.object(ConnectionPool, "execute") as m: + with mock.patch.object(self.session, "execute") as m: TestTimestampModel.timestamp(timedelta(seconds=30)).create(count=1) query = m.call_args[0][0] @@ -73,17 +72,18 @@ def test_non_batch_syntax_unit(self): class UpdateWithTimestampTest(BaseTimestampTest): def setUp(self): self.instance = TestTimestampModel.create(count=1) + super(UpdateWithTimestampTest, self).setUp() def test_instance_update_includes_timestamp_in_query(self): # not a batch - with mock.patch.object(ConnectionPool, "execute") as m: + with mock.patch.object(self.session, "execute") as m: self.instance.timestamp(timedelta(seconds=30)).update(count=2) "USING TIMESTAMP".should.be.within(m.call_args[0][0]) def test_instance_update_in_batch(self): - with mock.patch.object(ConnectionPool, "execute") as m, BatchQuery() as b: + with mock.patch.object(self.session, "execute") as m, BatchQuery() as b: self.instance.batch(b).timestamp(timedelta(seconds=30)).update(count=2) query = m.call_args[0][0] From 2250762395736d7b620262b36eb9045b36fc0962 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 19 Jun 2014 10:43:52 -0700 Subject: [PATCH 0743/3726] fixing batching constructors --- cqlengine/query.py | 4 ++-- upgrading.txt | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index e0219db9a2..3651ab29fd 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -614,12 +614,12 @@ class SimpleQuerySet(AbstractQuerySet): """ - def _get_result_constructor(self, names): + def _get_result_constructor(self): """ Returns a function that will be used to instantiate query results """ def _construct_instance(values): - return ResultObject(zip(names, values)) + return ResultObject(values) return _construct_instance diff --git a/upgrading.txt b/upgrading.txt index cf82097bff..4f941aa1c1 100644 --- a/upgrading.txt +++ b/upgrading.txt @@ -3,3 +3,4 @@ Calls to execute should no longer use named placeholders. We no longer raise cqlengine based OperationalError when connection fails. Now using the exception thrown in the native driver. +If you're calling setup() with the old thrift port in place, you will get connection errors. Either remove it (the default native port is assumed) or change to the default native port, 9042. From 5280d526aaac9d9d16243404b474640968479308 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 19 Jun 2014 10:45:30 -0700 Subject: [PATCH 0744/3726] removing connection pool --- cqlengine/tests/model/test_updates.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cqlengine/tests/model/test_updates.py b/cqlengine/tests/model/test_updates.py index c9ab45d9de..df60d5d779 100644 --- a/cqlengine/tests/model/test_updates.py +++ b/cqlengine/tests/model/test_updates.py @@ -69,11 +69,11 @@ def test_noop_model_update(self): """ tests that calling update on a model with no changes will do nothing. """ m0 = TestUpdateModel.create(count=5, text='monkey') - with patch.object(ConnectionPool, 'execute') as execute: + with patch.object(self.session, 'execute') as execute: m0.update() assert execute.call_count == 0 - with patch.object(ConnectionPool, 'execute') as execute: + with patch.object(self.session, 'execute') as execute: m0.update(count=5) assert execute.call_count == 0 From e30e99c6dcd0c7fdd3397f700c41a1f6d3e10af5 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 19 Jun 2014 10:47:08 -0700 Subject: [PATCH 0745/3726] removed connection pool and all references --- cqlengine/connection.py | 134 --------------------- cqlengine/tests/columns/test_validation.py | 6 +- cqlengine/tests/model/test_polymorphism.py | 2 +- cqlengine/tests/model/test_updates.py | 1 - cqlengine/tests/test_consistency.py | 1 - cqlengine/tests/test_ttl.py | 2 +- 6 files changed, 5 insertions(+), 141 deletions(-) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index 90a54a975c..5bf93e5d47 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -87,140 +87,6 @@ def setup( session.row_factory = dict_factory - -class ConnectionPool(object): - """Handles pooling of database connections.""" - - def __init__( - self, - hosts, - username=None, - password=None, - consistency=None, - timeout=None): - self._hosts = hosts - self._username = username - self._password = password - self._consistency = consistency - self._timeout = timeout - - self._queue = queue.Queue(maxsize=_max_connections) - - def clear(self): - """ - Force the connection pool to be cleared. Will close all internal - connections. - """ - try: - while not self._queue.empty(): - self._queue.get().close() - except: - pass - - def get(self): - """ - Returns a usable database connection. Uses the internal queue to - determine whether to return an existing connection or to create - a new one. - """ - try: - # get with block=False returns an item if one - # is immediately available, else raises the Empty exception - return self._queue.get(block=False) - except queue.Empty: - return self._create_connection() - - def put(self, conn): - """ - Returns a connection to the queue freeing it up for other queries to - use. - - :param conn: The connection to be released - :type conn: connection - """ - - try: - self._queue.put(conn, block=False) - except queue.Full: - conn.close() - - def _create_transport(self, host): - """ - Create a new Thrift transport for the given host. - - :param host: The host object - :type host: Host - - :rtype: thrift.TTransport.* - - """ - from thrift.transport import TSocket, TTransport - - thrift_socket = TSocket.TSocket(host.name, host.port) - - if self._timeout is not None: - thrift_socket.setTimeout(self._timeout) - - return TTransport.TFramedTransport(thrift_socket) - - def _create_connection(self): - """ - Creates a new connection for the connection pool. - - should only return a valid connection that it's actually connected to - """ - if not self._hosts: - raise CQLConnectionError("At least one host required") - - hosts = copy(self._hosts) - random.shuffle(hosts) - - for host in hosts: - try: - transport = self._create_transport(host) - new_conn = cql.connect( - host.name, - host.port, - user=self._username, - password=self._password, - consistency_level=self._consistency, - transport=transport - ) - new_conn.set_cql_version('3.0.0') - return new_conn - except Exception as exc: - logging.debug("Could not establish connection to" - " {}:{} ({!r})".format(host.name, host.port, exc)) - - raise CQLConnectionError("Could not connect to any server in cluster") - - def execute(self, query, params, consistency_level=None): - if not consistency_level: - consistency_level = self._consistency - - while True: - try: - con = self.get() - if not con: - raise CQLEngineException("Error calling execute without calling setup.") - LOG.debug('{} {}'.format(query, repr(params))) - cur = con.cursor() - cur.execute(query, params, consistency_level=consistency_level) - columns = [i[0] for i in cur.description or []] - results = [RowResult(r) for r in cur.fetchall()] - self.put(con) - return QueryResult(columns, results) - except CQLConnectionError as ex: - raise CQLEngineException("Could not execute query against the cluster") - except cql.ProgrammingError as ex: - raise CQLEngineException(unicode(ex)) - except TTransportException: - pass - except OperationalError as ex: - LOG.exception("Operational Error %s on %s:%s", ex, con.host, con.port) - raise ex - - def execute(query, params=None, consistency_level=None): raise Exception("shut up") diff --git a/cqlengine/tests/columns/test_validation.py b/cqlengine/tests/columns/test_validation.py index 4232b4003c..57f7aaf20c 100644 --- a/cqlengine/tests/columns/test_validation.py +++ b/cqlengine/tests/columns/test_validation.py @@ -6,7 +6,7 @@ from unittest import TestCase from uuid import uuid4, uuid1 from cqlengine import ValidationError -from cqlengine.connection import execute +from cqlengine.connection import execute, execute_native from cqlengine.tests.base import BaseCassEngTestCase @@ -175,7 +175,7 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): super(TestUUID, cls).tearDownClass() - delete_table(cls.UUIDTest) + delete_table(cls.UUIDTest) def test_uuid_str_with_dashes(self): a_uuid = uuid4() @@ -298,7 +298,7 @@ def test_extra_field(self): drop_table(self.TestModel) sync_table(self.TestModel) self.TestModel.create() - execute("ALTER TABLE {} add blah int".format(self.TestModel.column_family_name(include_keyspace=True))) + execute_native("ALTER TABLE {} add blah int".format(self.TestModel.column_family_name(include_keyspace=True))) self.TestModel.objects().all() class TestTimeUUIDFromDatetime(TestCase): diff --git a/cqlengine/tests/model/test_polymorphism.py b/cqlengine/tests/model/test_polymorphism.py index 197fcf712a..7db8bb4efd 100644 --- a/cqlengine/tests/model/test_polymorphism.py +++ b/cqlengine/tests/model/test_polymorphism.py @@ -3,7 +3,7 @@ from cqlengine import columns from cqlengine import models -from cqlengine.connection import ConnectionPool, get_session +from cqlengine.connection import get_session from cqlengine.tests.base import BaseCassEngTestCase from cqlengine import management diff --git a/cqlengine/tests/model/test_updates.py b/cqlengine/tests/model/test_updates.py index df60d5d779..6141436c8f 100644 --- a/cqlengine/tests/model/test_updates.py +++ b/cqlengine/tests/model/test_updates.py @@ -7,7 +7,6 @@ from cqlengine.models import Model from cqlengine import columns from cqlengine.management import sync_table, drop_table -from cqlengine.connection import ConnectionPool class TestUpdateModel(Model): diff --git a/cqlengine/tests/test_consistency.py b/cqlengine/tests/test_consistency.py index 44dbd08ea4..9821080b12 100644 --- a/cqlengine/tests/test_consistency.py +++ b/cqlengine/tests/test_consistency.py @@ -4,7 +4,6 @@ from uuid import uuid4 from cqlengine import columns import mock -from cqlengine.connection import ConnectionPool from cqlengine import ALL, BatchQuery class TestConsistencyModel(Model): diff --git a/cqlengine/tests/test_ttl.py b/cqlengine/tests/test_ttl.py index b55254a5eb..3d906b78e5 100644 --- a/cqlengine/tests/test_ttl.py +++ b/cqlengine/tests/test_ttl.py @@ -4,7 +4,7 @@ from uuid import uuid4 from cqlengine import columns import mock -from cqlengine.connection import ConnectionPool, get_session +from cqlengine.connection import get_session class TestTTLModel(Model): From 281fe43ea4609b2ce7b6c0a2f8e8013d3d13b940 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 19 Jun 2014 10:48:27 -0700 Subject: [PATCH 0746/3726] removed connection pool and all references --- upgrading.txt | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/upgrading.txt b/upgrading.txt index 4f941aa1c1..6a13ce7acc 100644 --- a/upgrading.txt +++ b/upgrading.txt @@ -1,6 +1,10 @@ 0.15 Upgrade to use Datastax Native Driver -Calls to execute should no longer use named placeholders. We no longer raise cqlengine based OperationalError when connection fails. Now using the exception thrown in the native driver. If you're calling setup() with the old thrift port in place, you will get connection errors. Either remove it (the default native port is assumed) or change to the default native port, 9042. + +The cqlengine connection pool has been removed. Connections are now managed by the native driver. This should drastically reduce the socket overhead as the native driver can multiplex queries. + + + From 6cfa0df73e9d42013e1fdd2ea7adfd1489bac0f9 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 19 Jun 2014 12:33:55 -0700 Subject: [PATCH 0747/3726] removed old context manager --- cqlengine/connection.py | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index 5bf93e5d47..1891da2c8c 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -94,6 +94,7 @@ def execute(query, params=None, consistency_level=None): if isinstance(query, BaseCQLStatement): params = query.get_context() query = str(query) + params = params or {} if consistency_level is None: consistency_level = connection_pool._consistency @@ -101,15 +102,6 @@ def execute(query, params=None, consistency_level=None): #return execute_native(query, params, consistency_level) -@contextmanager -def connection_manager(): - """ :rtype: ConnectionPool """ - raise Exception("deprecated") - # global connection_pool - # # tmp = connection_pool.get() - # yield connection_pool - # # connection_pool.put(tmp) - def execute_native(query, params=None, consistency_level=None): # TODO use consistency level From faffdfe912f7b511db47fee319b445de964b8b75 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 19 Jun 2014 12:46:26 -0700 Subject: [PATCH 0748/3726] removed unused imports --- cqlengine/connection.py | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index 1891da2c8c..3e14faaa8a 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -10,19 +10,11 @@ except ImportError: # python 3 import queue -import random -import cql import logging -from copy import copy from cqlengine.exceptions import CQLEngineException -from cql import OperationalError - -from contextlib import contextmanager - -from thrift.transport.TTransport import TTransportException from cqlengine.statements import BaseCQLStatement from cassandra.query import dict_factory @@ -94,7 +86,7 @@ def execute(query, params=None, consistency_level=None): if isinstance(query, BaseCQLStatement): params = query.get_context() query = str(query) - + params = params or {} if consistency_level is None: consistency_level = connection_pool._consistency From 1d58e740bd06e6ee68ca325b1ee16ad6a322c3ad Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 19 Jun 2014 13:37:57 -0700 Subject: [PATCH 0749/3726] fixing consistency level --- cqlengine/__init__.py | 19 +++++++++-------- cqlengine/connection.py | 33 ++++++----------------------- cqlengine/query.py | 2 +- cqlengine/tests/test_consistency.py | 12 +++++------ upgrading.txt | 3 +-- 5 files changed, 25 insertions(+), 44 deletions(-) diff --git a/cqlengine/__init__.py b/cqlengine/__init__.py index cd9f8f3557..c6e3965350 100644 --- a/cqlengine/__init__.py +++ b/cqlengine/__init__.py @@ -1,5 +1,7 @@ import pkg_resources +from cassandra import ConsistencyLevel + from cqlengine.columns import * from cqlengine.functions import * from cqlengine.models import Model @@ -15,12 +17,11 @@ LeveledCompactionStrategy = "LeveledCompactionStrategy" -ANY = "ANY" -ONE = "ONE" -TWO = "TWO" -THREE = "THREE" -QUORUM = "QUORUM" -LOCAL_QUORUM = "LOCAL_QUORUM" -EACH_QUORUM = "EACH_QUORUM" -ALL = "ALL" - +ANY = ConsistencyLevel.ANY +ONE = ConsistencyLevel.ONE +TWO = ConsistencyLevel.TWO +THREE = ConsistencyLevel.THREE +QUORUM = ConsistencyLevel.QUORUM +LOCAL_QUORUM = ConsistencyLevel.LOCAL_QUORUM +EACH_QUORUM = ConsistencyLevel.EACH_QUORUM +ALL = ConsistencyLevel.ALL diff --git a/cqlengine/connection.py b/cqlengine/connection.py index 3e14faaa8a..66aa55870d 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -4,6 +4,7 @@ from collections import namedtuple from cassandra.cluster import Cluster +from cassandra.query import SimpleStatement, Statement try: import Queue as queue @@ -24,10 +25,6 @@ class CQLConnectionError(CQLEngineException): pass Host = namedtuple('Host', ['name', 'port']) -_max_connections = 10 - -# global connection pool -connection_pool = None cluster = None session = None @@ -49,7 +46,6 @@ def setup( hosts, username=None, password=None, - max_connections=10, default_keyspace=None, consistency='ONE', timeout=None): @@ -62,8 +58,6 @@ def setup( :type username: str :param password: The cassandra password :type password: str - :param max_connections: The maximum number of connections to service - :type max_connections: int or long :param default_keyspace: The default keyspace to use :type default_keyspace: str :param consistency: The global consistency level @@ -72,34 +66,21 @@ def setup( :type timeout: int or long """ - global _max_connections, connection_pool, cluster, session + global cluster, session cluster = Cluster(hosts) session = cluster.connect() session.row_factory = dict_factory - -def execute(query, params=None, consistency_level=None): - - raise Exception("shut up") - - if isinstance(query, BaseCQLStatement): - params = query.get_context() - query = str(query) - - params = params or {} - if consistency_level is None: - consistency_level = connection_pool._consistency - return connection_pool.execute(query, params, consistency_level) - - #return execute_native(query, params, consistency_level) - - def execute_native(query, params=None, consistency_level=None): # TODO use consistency level - if isinstance(query, BaseCQLStatement): + if isinstance(query, Statement): + pass + + elif isinstance(query, BaseCQLStatement): params = query.get_context() query = str(query) + query = SimpleStatement(query, consistency_level=consistency_level) params = params or {} result = session.execute(query, params) diff --git a/cqlengine/query.py b/cqlengine/query.py index 3651ab29fd..a3f43e0f6b 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -4,7 +4,7 @@ from cqlengine import BaseContainerColumn, Map, columns from cqlengine.columns import Counter, List, Set -from cqlengine.connection import execute, RowResult, execute_native +from cqlengine.connection import RowResult, execute_native from cqlengine.exceptions import CQLEngineException, ValidationError diff --git a/cqlengine/tests/test_consistency.py b/cqlengine/tests/test_consistency.py index 9821080b12..465001082b 100644 --- a/cqlengine/tests/test_consistency.py +++ b/cqlengine/tests/test_consistency.py @@ -32,7 +32,7 @@ def test_create_uses_consistency(self): qs.create(text="i am not fault tolerant this way") args = m.call_args - self.assertEqual(ALL, args[0][2]) + self.assertEqual(ALL, args[0][0].consistency_level) def test_queryset_is_returned_on_create(self): qs = TestConsistencyModel.consistency(ALL) @@ -46,7 +46,7 @@ def test_update_uses_consistency(self): t.consistency(ALL).save() args = m.call_args - self.assertEqual(ALL, args[0][2]) + self.assertEqual(ALL, args[0][0].consistency_level) def test_batch_consistency(self): @@ -56,14 +56,14 @@ def test_batch_consistency(self): TestConsistencyModel.batch(b).create(text="monkey") args = m.call_args - self.assertEqual(ALL, args[0][2]) + self.assertEqual(ALL, args[0][0].consistency_level) with mock.patch.object(self.session, 'execute') as m: with BatchQuery() as b: TestConsistencyModel.batch(b).create(text="monkey") args = m.call_args - self.assertNotEqual(ALL, args[0][2]) + self.assertNotEqual(ALL, args[0][0].consistency_level) def test_blind_update(self): t = TestConsistencyModel.create(text="bacon and eggs") @@ -74,7 +74,7 @@ def test_blind_update(self): TestConsistencyModel.objects(id=uid).consistency(ALL).update(text="grilled cheese") args = m.call_args - self.assertEqual(ALL, args[0][2]) + self.assertEqual(ALL, args[0][0].consistency_level) def test_delete(self): @@ -90,4 +90,4 @@ def test_delete(self): TestConsistencyModel.objects(id=uid).consistency(ALL).delete() args = m.call_args - self.assertEqual(ALL, args[0][2]) + self.assertEqual(ALL, args[0][0].consistency_level) diff --git a/upgrading.txt b/upgrading.txt index 6a13ce7acc..59e3a7c189 100644 --- a/upgrading.txt +++ b/upgrading.txt @@ -6,5 +6,4 @@ If you're calling setup() with the old thrift port in place, you will get connec The cqlengine connection pool has been removed. Connections are now managed by the native driver. This should drastically reduce the socket overhead as the native driver can multiplex queries. - - +If you were previously manually using "ALL", "QUORUM", etc, to specificy consistency levels, you will need to migrate to the cqlengine.ALL, QUORUM, etc instead. If you had been using the module level constants before, nothing should need to change. From 3b6a3a739d8d8028795e8977f951dc4877637a62 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 19 Jun 2014 14:13:17 -0700 Subject: [PATCH 0750/3726] fixed multiple broken tests due to simple statement wrapper --- cqlengine/functions.py | 4 ++-- cqlengine/tests/columns/test_validation.py | 2 +- cqlengine/tests/model/test_polymorphism.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cqlengine/functions.py b/cqlengine/functions.py index d70b129710..838273cb6b 100644 --- a/cqlengine/functions.py +++ b/cqlengine/functions.py @@ -69,7 +69,7 @@ class MaxTimeUUID(BaseQueryFunction): http://cassandra.apache.org/doc/cql3/CQL.html#timeuuidFun """ - format_string = 'MaxTimeUUID(:{})' + format_string = 'MaxTimeUUID(%({})s)' def __init__(self, value): """ @@ -109,7 +109,7 @@ def get_context_size(self): return len(self.value) def __unicode__(self): - token_args = ', '.join(':{}'.format(self.context_id + i) for i in range(self.get_context_size())) + token_args = ', '.join('%({})s'.format(self.context_id + i) for i in range(self.get_context_size())) return "token({})".format(token_args) def update_context(self, ctx): diff --git a/cqlengine/tests/columns/test_validation.py b/cqlengine/tests/columns/test_validation.py index 57f7aaf20c..ad256062b2 100644 --- a/cqlengine/tests/columns/test_validation.py +++ b/cqlengine/tests/columns/test_validation.py @@ -6,7 +6,7 @@ from unittest import TestCase from uuid import uuid4, uuid1 from cqlengine import ValidationError -from cqlengine.connection import execute, execute_native +from cqlengine.connection import execute_native from cqlengine.tests.base import BaseCassEngTestCase diff --git a/cqlengine/tests/model/test_polymorphism.py b/cqlengine/tests/model/test_polymorphism.py index 7db8bb4efd..28c3dccb82 100644 --- a/cqlengine/tests/model/test_polymorphism.py +++ b/cqlengine/tests/model/test_polymorphism.py @@ -130,7 +130,7 @@ def test_delete_on_polymorphic_subclass_does_not_include_polymorphic_key(self): # not sure how we would even get here if it was in there # since the CQL would fail. - self.assertNotIn("row_type", m.call_args[0][0]) + self.assertNotIn("row_type", m.call_args[0][0].query_string) From ca2d0e27f850f823f8f68a5476586e1627047e74 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 19 Jun 2014 14:23:36 -0700 Subject: [PATCH 0751/3726] fixed brokenness around timeuuids --- cqlengine/functions.py | 2 +- cqlengine/tests/query/test_queryoperators.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cqlengine/functions.py b/cqlengine/functions.py index 838273cb6b..b5f6a5af71 100644 --- a/cqlengine/functions.py +++ b/cqlengine/functions.py @@ -42,7 +42,7 @@ class MinTimeUUID(BaseQueryFunction): http://cassandra.apache.org/doc/cql3/CQL.html#timeuuidFun """ - format_string = 'MinTimeUUID(:{})' + format_string = 'MinTimeUUID(%({})s)' def __init__(self, value): """ diff --git a/cqlengine/tests/query/test_queryoperators.py b/cqlengine/tests/query/test_queryoperators.py index dd70ef8300..1cedd6f367 100644 --- a/cqlengine/tests/query/test_queryoperators.py +++ b/cqlengine/tests/query/test_queryoperators.py @@ -32,7 +32,7 @@ def test_mintimeuuid_function(self): where = WhereClause('time', EqualsOperator(), functions.MinTimeUUID(now)) where.set_context_id(5) - self.assertEqual(str(where), '"time" = MinTimeUUID(:5)') + self.assertEqual(str(where), '"time" = MinTimeUUID(%(5)s)') ctx = {} where.update_context(ctx) self.assertEqual(ctx, {'5': DateTime().to_database(now)}) @@ -82,7 +82,7 @@ class TestModel(Model): q = TestModel.objects.filter(pk__token__gt=func) where = q._where[0] where.set_context_id(1) - self.assertEquals(str(where), 'token("p1", "p2") > token(:{}, :{})'.format(1, 2)) + self.assertEquals(str(where), 'token("p1", "p2") > token(%({})s, %({})s)'.format(1, 2)) # Verify that a SELECT query can be successfully generated str(q._select_query()) @@ -94,7 +94,7 @@ class TestModel(Model): q = TestModel.objects.filter(pk__token__gt=func) where = q._where[0] where.set_context_id(1) - self.assertEquals(str(where), 'token("p1", "p2") > token(:{}, :{})'.format(1, 2)) + self.assertEquals(str(where), 'token("p1", "p2") > token(%({})s, %({})s)'.format(1, 2)) str(q._select_query()) # The 'pk__token' virtual column may only be compared to a Token From 40061ce45cd9f42584d828496ef0b2dd8b947b5b Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 19 Jun 2014 14:24:58 -0700 Subject: [PATCH 0752/3726] fixed timeuuid. again --- cqlengine/tests/query/test_queryoperators.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cqlengine/tests/query/test_queryoperators.py b/cqlengine/tests/query/test_queryoperators.py index 1cedd6f367..01cce27fc8 100644 --- a/cqlengine/tests/query/test_queryoperators.py +++ b/cqlengine/tests/query/test_queryoperators.py @@ -19,7 +19,7 @@ def test_maxtimeuuid_function(self): where = WhereClause('time', EqualsOperator(), functions.MaxTimeUUID(now)) where.set_context_id(5) - self.assertEqual(str(where), '"time" = MaxTimeUUID(:5)') + self.assertEqual(str(where), '"time" = MaxTimeUUID(%(5)s)') ctx = {} where.update_context(ctx) self.assertEqual(ctx, {'5': DateTime().to_database(now)}) From 2416d91a28b8796f6ccf36131cea1141daf51686 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 19 Jun 2014 14:43:31 -0700 Subject: [PATCH 0753/3726] fixed batch consistency --- cqlengine/connection.py | 5 ++++- cqlengine/tests/test_consistency.py | 1 + cqlengine/tests/test_timestamp.py | 6 +++--- cqlengine/tests/test_ttl.py | 8 ++++---- 4 files changed, 12 insertions(+), 8 deletions(-) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index 66aa55870d..b2da6bf7d6 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -73,7 +73,6 @@ def setup( session.row_factory = dict_factory def execute_native(query, params=None, consistency_level=None): - # TODO use consistency level if isinstance(query, Statement): pass @@ -82,6 +81,10 @@ def execute_native(query, params=None, consistency_level=None): query = str(query) query = SimpleStatement(query, consistency_level=consistency_level) + elif isinstance(query, basestring): + query = SimpleStatement(query, consistency_level=consistency_level) + + params = params or {} result = session.execute(query, params) diff --git a/cqlengine/tests/test_consistency.py b/cqlengine/tests/test_consistency.py index 465001082b..9f0f3f86b3 100644 --- a/cqlengine/tests/test_consistency.py +++ b/cqlengine/tests/test_consistency.py @@ -56,6 +56,7 @@ def test_batch_consistency(self): TestConsistencyModel.batch(b).create(text="monkey") args = m.call_args + self.assertEqual(ALL, args[0][0].consistency_level) with mock.patch.object(self.session, 'execute') as m: diff --git a/cqlengine/tests/test_timestamp.py b/cqlengine/tests/test_timestamp.py index e59466e999..6af7bf543e 100644 --- a/cqlengine/tests/test_timestamp.py +++ b/cqlengine/tests/test_timestamp.py @@ -48,7 +48,7 @@ def test_timestamp_not_included_on_normal_create(self): with mock.patch.object(self.session, "execute") as m: TestTimestampModel.create(count=2) - "USING TIMESTAMP".shouldnt.be.within(m.call_args[0][0]) + "USING TIMESTAMP".shouldnt.be.within(m.call_args[0][0].query_string) def test_timestamp_is_set_on_model_queryset(self): delta = timedelta(seconds=30) @@ -64,7 +64,7 @@ def test_non_batch_syntax_unit(self): with mock.patch.object(self.session, "execute") as m: TestTimestampModel.timestamp(timedelta(seconds=30)).create(count=1) - query = m.call_args[0][0] + query = m.call_args[0][0].query_string "USING TIMESTAMP".should.be.within(query) @@ -80,7 +80,7 @@ def test_instance_update_includes_timestamp_in_query(self): with mock.patch.object(self.session, "execute") as m: self.instance.timestamp(timedelta(seconds=30)).update(count=2) - "USING TIMESTAMP".should.be.within(m.call_args[0][0]) + "USING TIMESTAMP".should.be.within(m.call_args[0][0].query_string) def test_instance_update_in_batch(self): with mock.patch.object(self.session, "execute") as m, BatchQuery() as b: diff --git a/cqlengine/tests/test_ttl.py b/cqlengine/tests/test_ttl.py index 3d906b78e5..45f865e0e6 100644 --- a/cqlengine/tests/test_ttl.py +++ b/cqlengine/tests/test_ttl.py @@ -45,7 +45,7 @@ def test_ttl_included_on_create(self): with mock.patch.object(session, 'execute') as m: TestTTLModel.ttl(60).create(text="hello blake") - query = m.call_args[0][0] + query = m.call_args[0][0].query_string self.assertIn("USING TTL", query) def test_queryset_is_returned_on_class(self): @@ -65,7 +65,7 @@ def test_update_includes_ttl(self): with mock.patch.object(session, 'execute') as m: model.ttl(60).update(text="goodbye forever") - query = m.call_args[0][0] + query = m.call_args[0][0].query_string self.assertIn("USING TTL", query) def test_update_syntax_valid(self): @@ -98,7 +98,7 @@ def test_ttl_is_include_with_query_on_update(self): with mock.patch.object(session, 'execute') as m: o.save() - query = m.call_args[0][0] + query = m.call_args[0][0].query_string self.assertIn("USING TTL", query) @@ -112,7 +112,7 @@ def test_ttl_included_with_blind_update(self): with mock.patch.object(session, 'execute') as m: TestTTLModel.objects(id=tid).ttl(60).update(text="bacon") - query = m.call_args[0][0] + query = m.call_args[0][0].query_string self.assertIn("USING TTL", query) From bcec8a4d22b49d115d9941c8fc4d1c3426c2076c Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 19 Jun 2014 14:45:14 -0700 Subject: [PATCH 0754/3726] fixed install requirements --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index dfbcdf05f1..a427f36bee 100644 --- a/setup.py +++ b/setup.py @@ -31,7 +31,7 @@ "Topic :: Software Development :: Libraries :: Python Modules", ], keywords='cassandra,cql,orm', - install_requires = ['cql', 'cassandra-driver'], + install_requires = ['cassandra-driver >= 2.0.0'], author='Blake Eggleston, Jon Haddad', author_email='bdeggleston@gmail.com', url='https://github.com/cqlengine/cqlengine', From 24d8989069aea101d46c7c2b0a63324b4422c27d Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 19 Jun 2014 14:57:18 -0700 Subject: [PATCH 0755/3726] fixed the last of the broken tests --- cqlengine/tests/test_timestamp.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cqlengine/tests/test_timestamp.py b/cqlengine/tests/test_timestamp.py index 6af7bf543e..b1a6a2860d 100644 --- a/cqlengine/tests/test_timestamp.py +++ b/cqlengine/tests/test_timestamp.py @@ -30,7 +30,7 @@ def test_batch_is_included(self): with mock.patch.object(self.session, "execute") as m, BatchQuery(timestamp=timedelta(seconds=30)) as b: TestTimestampModel.batch(b).create(count=1) - "USING TIMESTAMP".should.be.within(m.call_args[0][0]) + "USING TIMESTAMP".should.be.within(m.call_args[0][0].query_string) class CreateWithTimestampTest(BaseTimestampTest): @@ -39,7 +39,7 @@ def test_batch(self): with mock.patch.object(self.session, "execute") as m, BatchQuery() as b: TestTimestampModel.timestamp(timedelta(seconds=10)).batch(b).create(count=1) - query = m.call_args[0][0] + query = m.call_args[0][0].query_string query.should.match(r"INSERT.*USING TIMESTAMP") query.should_not.match(r"TIMESTAMP.*INSERT") @@ -86,7 +86,7 @@ def test_instance_update_in_batch(self): with mock.patch.object(self.session, "execute") as m, BatchQuery() as b: self.instance.batch(b).timestamp(timedelta(seconds=30)).update(count=2) - query = m.call_args[0][0] + query = m.call_args[0][0].query_string "USING TIMESTAMP".should.be.within(query) From de57f366a86b0334377d7ad135bab54e828aeda9 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Fri, 20 Jun 2014 11:00:32 -0700 Subject: [PATCH 0756/3726] removed bool quoter --- cqlengine/columns.py | 10 ---------- cqlengine/tests/columns/test_value_io.py | 20 -------------------- 2 files changed, 30 deletions(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index 83caac6b85..c33304bace 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -451,23 +451,13 @@ def from_datetime(self, dt): class Boolean(Column): db_type = 'boolean' - class Quoter(ValueQuoter): - """ Cassandra 1.2.5 is stricter about boolean values """ - def __str__(self): - return 'true' if self.value else 'false' - def validate(self, value): """ Always returns a Python boolean. """ - if isinstance(value, self.Quoter): - value = value.value return bool(value) def to_python(self, value): return self.validate(value) - def to_database(self, value): - return self.Quoter(self.validate(value)) - class Float(Column): db_type = 'double' diff --git a/cqlengine/tests/columns/test_value_io.py b/cqlengine/tests/columns/test_value_io.py index 4fa36d6cc1..121eb2580c 100644 --- a/cqlengine/tests/columns/test_value_io.py +++ b/cqlengine/tests/columns/test_value_io.py @@ -144,26 +144,6 @@ class TestTimeUUID(BaseColumnIOTest): def comparator_converter(self, val): return val if isinstance(val, UUID) else UUID(val) -class TestBooleanIO(BaseColumnIOTest): - - column = columns.Boolean - - pkey_val = True - data_val = False - - def comparator_converter(self, val): - return val.value if isinstance(val, columns.Boolean.Quoter) else val - -class TestBooleanQuoter(BaseColumnIOTest): - - column = columns.Boolean - - pkey_val = True - data_val = columns.Boolean.Quoter(False) - - def comparator_converter(self, val): - return val.value if isinstance(val, columns.Boolean.Quoter) else val - class TestFloatIO(BaseColumnIOTest): column = columns.Float From 6c4c6f1a3f7e0846667e860b3ec30d5c09fb2c59 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Mon, 23 Jun 2014 12:06:01 -0700 Subject: [PATCH 0757/3726] kwargs on setup --- cqlengine/connection.py | 12 +++++++----- cqlengine/management.py | 18 +++++++++--------- cqlengine/query.py | 8 ++++---- cqlengine/tests/columns/test_validation.py | 4 ++-- 4 files changed, 22 insertions(+), 20 deletions(-) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index b2da6bf7d6..592cdee9c3 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -44,11 +44,10 @@ def _column_tuple_factory(colnames, values): def setup( hosts, - username=None, - password=None, default_keyspace=None, consistency='ONE', - timeout=None): + timeout=None, + **kwargs): """ Records the hosts and connects to one of them @@ -68,11 +67,14 @@ def setup( """ global cluster, session - cluster = Cluster(hosts) + if 'username' in kwargs or 'password' in kwargs: + raise CQLEngineException("Username & Password are now handled by using the native driver's auth_provider") + + cluster = Cluster(hosts, **kwargs) session = cluster.connect() session.row_factory = dict_factory -def execute_native(query, params=None, consistency_level=None): +def execute(query, params=None, consistency_level=None): if isinstance(query, Statement): pass diff --git a/cqlengine/management.py b/cqlengine/management.py index a0716002da..2eeaa310ef 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -4,7 +4,7 @@ from cqlengine import ONE from cqlengine.named import NamedTable -from cqlengine.connection import execute_native, get_cluster +from cqlengine.connection import execute, get_cluster from cqlengine.exceptions import CQLEngineException import logging @@ -50,13 +50,13 @@ def create_keyspace(name, strategy_class='SimpleStrategy', replication_factor=3, if strategy_class != 'SimpleStrategy': query += " AND DURABLE_WRITES = {}".format('true' if durable_writes else 'false') - execute_native(query) + execute(query) def delete_keyspace(name): cluster = get_cluster() if name in cluster.metadata.keyspaces: - execute_native("DROP KEYSPACE {}".format(name)) + execute("DROP KEYSPACE {}".format(name)) def create_table(model, create_missing_keyspace=True): warnings.warn("create_table has been deprecated in favor of sync_table and will be removed in a future release", DeprecationWarning) @@ -96,7 +96,7 @@ def sync_table(model, create_missing_keyspace=True): qs = get_create_table(model) try: - execute_native(qs) + execute(qs) except CQLEngineException as ex: # 1.2 doesn't return cf names, so we have to examine the exception # and ignore if it says the column family already exists @@ -113,7 +113,7 @@ def sync_table(model, create_missing_keyspace=True): # add missing column using the column def query = "ALTER TABLE {} add {}".format(cf_name, col.get_column_def()) logger.debug(query) - execute_native(query) + execute(query) update_compaction(model) @@ -130,7 +130,7 @@ def sync_table(model, create_missing_keyspace=True): qs += ['ON {}'.format(cf_name)] qs += ['("{}")'.format(column.db_field_name)] qs = ' '.join(qs) - execute_native(qs) + execute(qs) def get_create_table(model): cf_name = model.column_family_name() @@ -224,7 +224,7 @@ def get_fields(model): col_family = model.column_family_name(include_keyspace=False) query = "select * from system.schema_columns where keyspace_name = %s and columnfamily_name = %s" - tmp = execute_native(query, [ks_name, col_family]) + tmp = execute(query, [ks_name, col_family]) # Tables containing only primary keys do not appear to create # any entries in system.schema_columns, as only non-primary-key attributes @@ -287,7 +287,7 @@ def update_compaction(model): cf_name = model.column_family_name() query = "ALTER TABLE {} with compaction = {}".format(cf_name, options) logger.debug(query) - execute_native(query) + execute(query) return True return False @@ -308,7 +308,7 @@ def drop_table(model): try: table = meta.keyspaces[ks_name].tables[raw_cf_name] - execute_native('drop table {};'.format(model.column_family_name(include_keyspace=True))) + execute('drop table {};'.format(model.column_family_name(include_keyspace=True))) except KeyError: pass diff --git a/cqlengine/query.py b/cqlengine/query.py index a3f43e0f6b..1536456851 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -4,7 +4,7 @@ from cqlengine import BaseContainerColumn, Map, columns from cqlengine.columns import Counter, List, Set -from cqlengine.connection import RowResult, execute_native +from cqlengine.connection import RowResult, execute from cqlengine.exceptions import CQLEngineException, ValidationError @@ -174,7 +174,7 @@ def execute(self): query_list.append('APPLY BATCH;') - execute_native('\n'.join(query_list), parameters, self._consistency) + execute('\n'.join(query_list), parameters, self._consistency) self.queries = [] self._execute_callbacks() @@ -232,7 +232,7 @@ def _execute(self, q): if self._batch: return self._batch.add_query(q) else: - result = execute_native(q, consistency_level=self._consistency) + result = execute(q, consistency_level=self._consistency) return result def __unicode__(self): @@ -781,7 +781,7 @@ def _execute(self, q): if self._batch: return self._batch.add_query(q) else: - tmp = execute_native(q, consistency_level=self._consistency) + tmp = execute(q, consistency_level=self._consistency) return tmp def batch(self, batch_obj): diff --git a/cqlengine/tests/columns/test_validation.py b/cqlengine/tests/columns/test_validation.py index ad256062b2..ceaa9371aa 100644 --- a/cqlengine/tests/columns/test_validation.py +++ b/cqlengine/tests/columns/test_validation.py @@ -6,7 +6,7 @@ from unittest import TestCase from uuid import uuid4, uuid1 from cqlengine import ValidationError -from cqlengine.connection import execute_native +from cqlengine.connection import execute from cqlengine.tests.base import BaseCassEngTestCase @@ -298,7 +298,7 @@ def test_extra_field(self): drop_table(self.TestModel) sync_table(self.TestModel) self.TestModel.create() - execute_native("ALTER TABLE {} add blah int".format(self.TestModel.column_family_name(include_keyspace=True))) + execute("ALTER TABLE {} add blah int".format(self.TestModel.column_family_name(include_keyspace=True))) self.TestModel.objects().all() class TestTimeUUIDFromDatetime(TestCase): From 41072658b9c7bee68f24d63eae62ea8e17cb2a4b Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Mon, 23 Jun 2014 12:11:02 -0700 Subject: [PATCH 0758/3726] removed dead code --- cqlengine/connection.py | 10 ---------- cqlengine/query.py | 3 +-- 2 files changed, 1 insertion(+), 12 deletions(-) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index 592cdee9c3..5ed17131ef 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -32,16 +32,6 @@ class CQLConnectionError(CQLEngineException): pass class CQLConnectionError(CQLEngineException): pass -class RowResult(tuple): - pass - -QueryResult = namedtuple('RowResult', ('columns', 'results')) - - -def _column_tuple_factory(colnames, values): - return tuple(colnames), [RowResult(v) for v in values] - - def setup( hosts, default_keyspace=None, diff --git a/cqlengine/query.py b/cqlengine/query.py index 1536456851..320084ef00 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -4,8 +4,7 @@ from cqlengine import BaseContainerColumn, Map, columns from cqlengine.columns import Counter, List, Set -from cqlengine.connection import RowResult, execute - +from cqlengine.connection import execute from cqlengine.exceptions import CQLEngineException, ValidationError from cqlengine.functions import Token, BaseQueryFunction, QueryValue From 7779a032066360f86b5b2e505618f206298c0f5f Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Mon, 23 Jun 2014 12:27:46 -0700 Subject: [PATCH 0759/3726] fixes for consistency and setup -> native driver passthrough --- cqlengine/connection.py | 17 +++++++++-------- upgrading.txt | 4 ++++ 2 files changed, 13 insertions(+), 8 deletions(-) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index 5ed17131ef..b72b6a5ce7 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -15,7 +15,7 @@ import logging from cqlengine.exceptions import CQLEngineException - +from cassandra import ConsistencyLevel from cqlengine.statements import BaseCQLStatement from cassandra.query import dict_factory @@ -27,16 +27,12 @@ class CQLConnectionError(CQLEngineException): pass cluster = None session = None - - -class CQLConnectionError(CQLEngineException): pass - +default_consistency_level = None def setup( hosts, default_keyspace=None, - consistency='ONE', - timeout=None, + consistency=ConsistencyLevel.ONE, **kwargs): """ Records the hosts and connects to one of them @@ -55,16 +51,21 @@ def setup( :type timeout: int or long """ - global cluster, session + global cluster, session, default_consistency_level if 'username' in kwargs or 'password' in kwargs: raise CQLEngineException("Username & Password are now handled by using the native driver's auth_provider") + default_consistency_level = consistency cluster = Cluster(hosts, **kwargs) session = cluster.connect() session.row_factory = dict_factory def execute(query, params=None, consistency_level=None): + + if consistency_level is None: + consistency_level = default_consistency_level + if isinstance(query, Statement): pass diff --git a/upgrading.txt b/upgrading.txt index 59e3a7c189..95727aa546 100644 --- a/upgrading.txt +++ b/upgrading.txt @@ -7,3 +7,7 @@ If you're calling setup() with the old thrift port in place, you will get connec The cqlengine connection pool has been removed. Connections are now managed by the native driver. This should drastically reduce the socket overhead as the native driver can multiplex queries. If you were previously manually using "ALL", "QUORUM", etc, to specificy consistency levels, you will need to migrate to the cqlengine.ALL, QUORUM, etc instead. If you had been using the module level constants before, nothing should need to change. + +No longer accepting username & password as arguments to setup. Use the native driver's authentication instead. See http://datastax.github.io/python-driver/api/cassandra/auth.html + + From cae504681f2794f31ffa1de8f108203a0cd7b72a Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Mon, 23 Jun 2014 14:38:20 -0700 Subject: [PATCH 0760/3726] removed old fields --- cqlengine/connection.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index b72b6a5ce7..af1afd7b5a 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -39,14 +39,10 @@ def setup( :param hosts: list of hosts, strings in the :, or just :type hosts: list - :param username: The cassandra username - :type username: str - :param password: The cassandra password - :type password: str :param default_keyspace: The default keyspace to use :type default_keyspace: str :param consistency: The global consistency level - :type consistency: str + :type consistency: int :param timeout: The connection timeout in milliseconds :type timeout: int or long From 82f3d4ed11396bff254028136c0e279d2be3af5b Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Mon, 23 Jun 2014 14:43:07 -0700 Subject: [PATCH 0761/3726] fixing docs --- cqlengine/connection.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index af1afd7b5a..b080792c93 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -37,15 +37,12 @@ def setup( """ Records the hosts and connects to one of them - :param hosts: list of hosts, strings in the :, or just + :param hosts: list of hosts, see http://datastax.github.io/python-driver/api/cassandra/cluster.html :type hosts: list :param default_keyspace: The default keyspace to use :type default_keyspace: str :param consistency: The global consistency level :type consistency: int - :param timeout: The connection timeout in milliseconds - :type timeout: int or long - """ global cluster, session, default_consistency_level From 41b4cf10aec623ca244a4c173d83465f88a6ac49 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Mon, 23 Jun 2014 14:45:51 -0700 Subject: [PATCH 0762/3726] updated documentation --- docs/index.rst | 5 ++++- docs/topics/connection.rst | 13 +++---------- 2 files changed, 7 insertions(+), 11 deletions(-) diff --git a/docs/index.rst b/docs/index.rst index d748f39fd1..82e5cb3743 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -51,7 +51,10 @@ Getting Started #next, setup the connection to your cassandra server(s)... >>> from cqlengine import connection - >>> connection.setup(['127.0.0.1:9160']) + + # see http://datastax.github.io/python-driver/api/cassandra/cluster.html for options + # the list of hosts will be passed to create a Cluster() instance + >>> connection.setup(['127.0.0.1']) #...and create your CQL table >>> from cqlengine.management import sync_table diff --git a/docs/topics/connection.rst b/docs/topics/connection.rst index 53eaeafad6..11eeda39e0 100644 --- a/docs/topics/connection.rst +++ b/docs/topics/connection.rst @@ -11,22 +11,15 @@ Connection The setup function in `cqlengine.connection` records the Cassandra servers to connect to. If there is a problem with one of the servers, cqlengine will try to connect to each of the other connections before failing. -.. function:: setup(hosts [, username=None, password=None, consistency='ONE']) +.. function:: setup(hosts) :param hosts: list of hosts, strings in the :, or just :type hosts: list - :param username: a username, if required - :type username: str - - :param password: a password, if required - :type password: str - :param consistency: the consistency level of the connection, defaults to 'ONE' - :type consistency: str + :type consistency: int - :param timeout: the connection timeout in milliseconds - :type timeout: int or long + # see http://datastax.github.io/python-driver/api/cassandra.html#cassandra.ConsistencyLevel Records the hosts and connects to one of them From 877a89d09a42872b28539dc59fa575079c659a4b Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Mon, 23 Jun 2014 15:03:26 -0700 Subject: [PATCH 0763/3726] adding protocol version as option in tests. need to get 1.2 working properly --- cqlengine/tests/base.py | 4 +++- docs/index.rst | 3 +++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/cqlengine/tests/base.py b/cqlengine/tests/base.py index c8679c1b21..3250de1471 100644 --- a/cqlengine/tests/base.py +++ b/cqlengine/tests/base.py @@ -9,7 +9,9 @@ else: CASSANDRA_TEST_HOST = 'localhost' -connection.setup([CASSANDRA_TEST_HOST], default_keyspace='cqlengine_test') +protocol_version = int(os.environ.get("CASSANDRA_PROTOCOL_VERSION", 2)) + +connection.setup([CASSANDRA_TEST_HOST], protocol_version=protocol_version, default_keyspace='cqlengine_test') class BaseCassEngTestCase(TestCase): diff --git a/docs/index.rst b/docs/index.rst index 82e5cb3743..e9663bc85d 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -56,6 +56,9 @@ Getting Started # the list of hosts will be passed to create a Cluster() instance >>> connection.setup(['127.0.0.1']) + # if you're connecting to a 1.2 cluster + >>> connection.setup(['127.0.0.1'], protocol_version=1) + #...and create your CQL table >>> from cqlengine.management import sync_table >>> sync_table(ExampleModel) From e2e144053083770ad67422f6d7b3f35f2c1ede35 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Mon, 23 Jun 2014 15:26:32 -0700 Subject: [PATCH 0764/3726] fixed sync issue with 1.2 --- cqlengine/management.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cqlengine/management.py b/cqlengine/management.py index 2eeaa310ef..54128ceb23 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -232,7 +232,7 @@ def get_fields(model): try: return [Field(x['column_name'], x['validator']) for x in tmp if x['type'] == 'regular'] - except ValueError: + except KeyError: return [Field(x['column_name'], x['validator']) for x in tmp] # convert to Field named tuples From e38d02bbb31c0bb8a018740c4b9fa89979a4aff2 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Mon, 23 Jun 2014 16:24:27 -0700 Subject: [PATCH 0765/3726] fixed readme --- README.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index b99b182834..7965610d56 100644 --- a/README.md +++ b/README.md @@ -33,7 +33,10 @@ class ExampleModel(Model): #next, setup the connection to your cassandra server(s)... >>> from cqlengine import connection ->>> connection.setup(['127.0.0.1:9160']) +>>> connection.setup(['127.0.0.1']) + +# or if you're still on cassandra 1.2 +>>> connection.setup(['127.0.0.1'], protocol_version=1) #...and create your CQL table >>> from cqlengine.management import sync_table From 6bb41fca2eb296007cf016824dfc616ee013eab9 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Tue, 24 Jun 2014 13:36:56 -0700 Subject: [PATCH 0766/3726] version bump --- cqlengine/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cqlengine/VERSION b/cqlengine/VERSION index a803cc227f..a551051694 100644 --- a/cqlengine/VERSION +++ b/cqlengine/VERSION @@ -1 +1 @@ -0.14.0 +0.15.0 From 19b8e84caea8c47bfd1a70f4fd9377afe5b75184 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 25 Jun 2014 07:37:32 -0700 Subject: [PATCH 0767/3726] fixed default keyspace --- cqlengine/connection.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index b080792c93..bd000bd0cc 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -49,6 +49,10 @@ def setup( if 'username' in kwargs or 'password' in kwargs: raise CQLEngineException("Username & Password are now handled by using the native driver's auth_provider") + if default_keyspace: + from cqlengine import models + models.DEFAULT_KEYSPACE = default_keyspace + default_consistency_level = consistency cluster = Cluster(hosts, **kwargs) session = cluster.connect() From 130f554e49a068d66822c9f1dd93e6a06b82d09e Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 25 Jun 2014 07:37:52 -0700 Subject: [PATCH 0768/3726] version bump --- cqlengine/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cqlengine/VERSION b/cqlengine/VERSION index a551051694..e815b861f0 100644 --- a/cqlengine/VERSION +++ b/cqlengine/VERSION @@ -1 +1 @@ -0.15.0 +0.15.1 From af9abb4cb69277979efc34e1295104e4ab49661d Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 25 Jun 2014 10:01:39 -0700 Subject: [PATCH 0769/3726] added old cql package back in, will need to be removed in next release --- cqlengine/VERSION | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cqlengine/VERSION b/cqlengine/VERSION index e815b861f0..4312e0d0ca 100644 --- a/cqlengine/VERSION +++ b/cqlengine/VERSION @@ -1 +1 @@ -0.15.1 +0.15.2 diff --git a/setup.py b/setup.py index a427f36bee..2d6740db31 100644 --- a/setup.py +++ b/setup.py @@ -31,7 +31,7 @@ "Topic :: Software Development :: Libraries :: Python Modules", ], keywords='cassandra,cql,orm', - install_requires = ['cassandra-driver >= 2.0.0'], + install_requires = ['cassandra-driver >= 2.0.0', "cql"], author='Blake Eggleston, Jon Haddad', author_email='bdeggleston@gmail.com', url='https://github.com/cqlengine/cqlengine', From c74434370b563b5de1558bb0fd437851b0cea1ad Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 25 Jun 2014 10:50:20 -0700 Subject: [PATCH 0770/3726] removed dead code and tests --- cqlengine/columns.py | 182 ------------------ .../tests/columns/test_container_columns.py | 80 -------- 2 files changed, 262 deletions(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index c33304bace..e3a32b70e8 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -315,17 +315,6 @@ def __init__(self, required=required, ) - def get_update_statement(self, val, prev, ctx): - val = self.to_database(val) - prev = self.to_database(prev or 0) - field_id = uuid4().hex - - delta = val - prev - sign = '-' if delta < 0 else '+' - delta = abs(delta) - ctx[field_id] = delta - return ['"{0}" = "{0}" {1} {2}'.format(self.db_field_name, sign, delta)] - class DateTime(Column): db_type = 'timestamp' @@ -544,11 +533,6 @@ def get_column_def(self): db_type = self.db_type.format(self.value_type.db_type) return '{} {}'.format(self.cql, db_type) - def get_update_statement(self, val, prev, ctx): - """ - Used to add partial update statements - """ - raise NotImplementedError def _val_is_null(self, val): return not val @@ -609,48 +593,7 @@ def to_database(self, value): if isinstance(value, self.Quoter): return value return self.Quoter({self.value_col.to_database(v) for v in value}) - def get_update_statement(self, val, prev, ctx): - """ - Returns statements that will be added to an object's update statement - also updates the query context - :param val: the current column value - :param prev: the previous column value - :param ctx: the values that will be passed to the query - :rtype: list - """ - - # remove from Quoter containers, if applicable - val = self.to_database(val) - prev = self.to_database(prev) - if isinstance(val, self.Quoter): val = val.value - if isinstance(prev, self.Quoter): prev = prev.value - - if val is None or val == prev: - # don't return anything if the new value is the same as - # the old one, or if the new value is none - return [] - elif prev is None or not any({v in prev for v in val}): - field = uuid1().hex - ctx[field] = self.Quoter(val) - return ['"{}" = :{}'.format(self.db_field_name, field)] - else: - # partial update time - to_create = val - prev - to_delete = prev - val - statements = [] - - if to_create: - field_id = uuid1().hex - ctx[field_id] = self.Quoter(to_create) - statements += ['"{0}" = "{0}" + :{1}'.format(self.db_field_name, field_id)] - - if to_delete: - field_id = uuid1().hex - ctx[field_id] = self.Quoter(to_delete) - statements += ['"{0}" = "{0}" - :{1}'.format(self.db_field_name, field_id)] - - return statements class List(BaseContainerColumn): @@ -691,82 +634,6 @@ def to_database(self, value): if isinstance(value, self.Quoter): return value return self.Quoter([self.value_col.to_database(v) for v in value]) - def get_update_statement(self, val, prev, values): - """ - Returns statements that will be added to an object's update statement - also updates the query context - """ - # remove from Quoter containers, if applicable - val = self.to_database(val) - prev = self.to_database(prev) - if isinstance(val, self.Quoter): val = val.value - if isinstance(prev, self.Quoter): prev = prev.value - - def _insert(): - field_id = uuid1().hex - values[field_id] = self.Quoter(val) - return ['"{}" = :{}'.format(self.db_field_name, field_id)] - - if val is None or val == prev: - return [] - - elif prev is None: - return _insert() - - elif len(val) < len(prev): - # if elements have been removed, - # rewrite the whole list - return _insert() - - elif len(prev) == 0: - # if we're updating from an empty - # list, do a complete insert - return _insert() - - else: - # the prepend and append lists, - # if both of these are still None after looking - # at both lists, an insert statement will be returned - prepend = None - append = None - - # the max start idx we want to compare - search_space = len(val) - max(0, len(prev)-1) - - # the size of the sub lists we want to look at - search_size = len(prev) - - for i in range(search_space): - #slice boundary - j = i + search_size - sub = val[i:j] - idx_cmp = lambda idx: prev[idx] == sub[idx] - if idx_cmp(0) and idx_cmp(-1) and prev == sub: - prepend = val[:i] - append = val[j:] - break - - # create update statements - if prepend is append is None: - return _insert() - - statements = [] - if prepend: - field_id = uuid1().hex - # CQL seems to prepend element at a time, starting - # with the element at idx 0, we can either reverse - # it here, or have it inserted in reverse - prepend.reverse() - values[field_id] = self.Quoter(prepend) - statements += ['"{0}" = :{1} + "{0}"'.format(self.db_field_name, field_id)] - - if append: - field_id = uuid1().hex - values[field_id] = self.Quoter(append) - statements += ['"{0}" = "{0}" + :{1}'.format(self.db_field_name, field_id)] - - return statements - class Map(BaseContainerColumn): """ @@ -841,55 +708,6 @@ def to_database(self, value): if isinstance(value, self.Quoter): return value return self.Quoter({self.key_col.to_database(k):self.value_col.to_database(v) for k,v in value.items()}) - def get_update_statement(self, val, prev, ctx): - """ - http://www.datastax.com/docs/1.2/cql_cli/using/collections_map#deletion - """ - # remove from Quoter containers, if applicable - val = self.to_database(val) - prev = self.to_database(prev) - if isinstance(val, self.Quoter): val = val.value - if isinstance(prev, self.Quoter): prev = prev.value - val = val or {} - prev = prev or {} - - #get the updated map - update = {k:v for k,v in val.items() if v != prev.get(k)} - - statements = [] - for k,v in update.items(): - key_id = uuid1().hex - val_id = uuid1().hex - ctx[key_id] = k - ctx[val_id] = v - statements += ['"{}"[:{}] = :{}'.format(self.db_field_name, key_id, val_id)] - - return statements - - def get_delete_statement(self, val, prev, ctx): - """ - Returns statements that will be added to an object's delete statement - also updates the query context, used for removing keys from a map - """ - if val is prev is None: - return [] - - val = self.to_database(val) - prev = self.to_database(prev) - if isinstance(val, self.Quoter): val = val.value - if isinstance(prev, self.Quoter): prev = prev.value - - old_keys = set(prev.keys()) if prev else set() - new_keys = set(val.keys()) if val else set() - del_keys = old_keys - new_keys - - del_statements = [] - for key in del_keys: - field_id = uuid1().hex - ctx[field_id] = key - del_statements += ['"{}"[:{}]'.format(self.db_field_name, field_id)] - - return del_statements class _PartitionKeysToken(Column): diff --git a/cqlengine/tests/columns/test_container_columns.py b/cqlengine/tests/columns/test_container_columns.py index 838f2eb298..86a96e7edb 100644 --- a/cqlengine/tests/columns/test_container_columns.py +++ b/cqlengine/tests/columns/test_container_columns.py @@ -124,46 +124,6 @@ def test_partial_updates(self): m2 = TestSetModel.get(partition=m1.partition) assert m2.int_set == {2, 3, 4, 5} - def test_partial_update_creation(self): - """ - Tests that proper update statements are created for a partial set update - :return: - """ - ctx = {} - col = columns.Set(columns.Integer, db_field="TEST") - statements = col.get_update_statement({1, 2, 3, 4}, {2, 3, 4, 5}, ctx) - - assert len([v for v in ctx.values() if {1} == v.value]) == 1 - assert len([v for v in ctx.values() if {5} == v.value]) == 1 - assert len([s for s in statements if '"TEST" = "TEST" -' in s]) == 1 - assert len([s for s in statements if '"TEST" = "TEST" +' in s]) == 1 - - def test_update_from_none(self): - """ Tests that updating a 'None' list creates a straight insert statement """ - ctx = {} - col = columns.Set(columns.Integer, db_field="TEST") - statements = col.get_update_statement({1, 2, 3, 4}, None, ctx) - - #only one variable /statement should be generated - assert len(ctx) == 1 - assert len(statements) == 1 - - assert ctx.values()[0].value == {1, 2, 3, 4} - assert statements[0] == '"TEST" = :{}'.format(ctx.keys()[0]) - - def test_update_from_empty(self): - """ Tests that updating an empty list creates a straight insert statement """ - ctx = {} - col = columns.Set(columns.Integer, db_field="TEST") - statements = col.get_update_statement({1, 2, 3, 4}, set(), ctx) - - #only one variable /statement should be generated - assert len(ctx) == 1 - assert len(statements) == 1 - - assert ctx.values()[0].value == {1, 2, 3, 4} - assert statements[0] == '"TEST" = :{}'.format(ctx.keys()[0]) - def test_instantiation_with_column_class(self): """ Tests that columns instantiated with a column class work properly @@ -273,46 +233,6 @@ def test_partial_updates(self): m2 = TestListModel.get(partition=m1.partition) assert list(m2.int_list) == final - def test_partial_update_creation(self): - """ Tests that proper update statements are created for a partial list update """ - final = range(10) - initial = final[3:7] - - ctx = {} - col = columns.List(columns.Integer, db_field="TEST") - statements = col.get_update_statement(final, initial, ctx) - - assert len([v for v in ctx.values() if [2, 1, 0] == v.value]) == 1 - assert len([v for v in ctx.values() if [7, 8, 9] == v.value]) == 1 - assert len([s for s in statements if '"TEST" = "TEST" +' in s]) == 1 - assert len([s for s in statements if '+ "TEST"' in s]) == 1 - - def test_update_from_none(self): - """ Tests that updating an 'None' list creates a straight insert statement """ - ctx = {} - col = columns.List(columns.Integer, db_field="TEST") - statements = col.get_update_statement([1, 2, 3], None, ctx) - - #only one variable /statement should be generated - assert len(ctx) == 1 - assert len(statements) == 1 - - assert ctx.values()[0].value == [1, 2, 3] - assert statements[0] == '"TEST" = :{}'.format(ctx.keys()[0]) - - def test_update_from_empty(self): - """ Tests that updating an empty list creates a straight insert statement """ - ctx = {} - col = columns.List(columns.Integer, db_field="TEST") - statements = col.get_update_statement([1, 2, 3], [], ctx) - - #only one variable /statement should be generated - assert len(ctx) == 1 - assert len(statements) == 1 - - assert ctx.values()[0].value == [1, 2, 3] - assert statements[0] == '"TEST" = :{}'.format(ctx.keys()[0]) - def test_instantiation_with_column_class(self): """ Tests that columns instantiated with a column class work properly From a8a45c0fa6471e506c4fa842cbeaa4535fb64b44 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 25 Jun 2014 11:11:33 -0700 Subject: [PATCH 0771/3726] fixed boolean defaults --- cqlengine/columns.py | 1 + cqlengine/tests/columns/test_validation.py | 22 ++++++++++++++++++++-- 2 files changed, 21 insertions(+), 2 deletions(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index e3a32b70e8..e211a4478a 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -442,6 +442,7 @@ class Boolean(Column): def validate(self, value): """ Always returns a Python boolean. """ + value = super(Boolean, self).validate(value) return bool(value) def to_python(self, value): diff --git a/cqlengine/tests/columns/test_validation.py b/cqlengine/tests/columns/test_validation.py index ceaa9371aa..878186d22e 100644 --- a/cqlengine/tests/columns/test_validation.py +++ b/cqlengine/tests/columns/test_validation.py @@ -78,6 +78,24 @@ def test_datetime_none(self): assert dts[0][0] is None +class TestBoolDefault(BaseCassEngTestCase): + class BoolDefaultValueTest(Model): + test_id = Integer(primary_key=True) + stuff = Boolean(default=True) + + @classmethod + def setUpClass(cls): + super(TestBoolDefault, cls).setUpClass() + sync_table(cls.BoolDefaultValueTest) + + def test_default_is_set(self): + tmp = self.BoolDefaultValueTest.create(test_id=1) + self.assertEqual(True, tmp.stuff) + tmp2 = self.BoolDefaultValueTest.get(test_id=1) + self.assertEqual(True, tmp2.stuff) + + + class TestVarInt(BaseCassEngTestCase): class VarIntTest(Model): test_id = Integer(primary_key=True) @@ -86,12 +104,12 @@ class VarIntTest(Model): @classmethod def setUpClass(cls): super(TestVarInt, cls).setUpClass() - create_table(cls.VarIntTest) + sync_table(cls.VarIntTest) @classmethod def tearDownClass(cls): super(TestVarInt, cls).tearDownClass() - delete_table(cls.VarIntTest) + sync_table(cls.VarIntTest) def test_varint_io(self): long_int = sys.maxint + 1 From cb335d9be01f3ce3a4f99bfcf031d1da9cbfac3d Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 25 Jun 2014 11:11:47 -0700 Subject: [PATCH 0772/3726] version bump --- cqlengine/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cqlengine/VERSION b/cqlengine/VERSION index 4312e0d0ca..1985d91413 100644 --- a/cqlengine/VERSION +++ b/cqlengine/VERSION @@ -1 +1 @@ -0.15.2 +0.15.3 From 5d75a339a96a324e09e84299fe869c5e8f1c3aaa Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 25 Jun 2014 12:08:37 -0700 Subject: [PATCH 0773/3726] using native driver libs for decoding --- cqlengine/columns.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index e211a4478a..66286d38e8 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -3,11 +3,10 @@ from datetime import datetime from datetime import date import re -from uuid import uuid1, uuid4 -from cql.query import cql_quote -from cql.cqltypes import DateType +from cassandra.cqltypes import DateType from cqlengine.exceptions import ValidationError +from cassandra.encoder import cql_quote class BaseValueManager(object): From 469799385e3fa8f820506feac651697b09f21590 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 25 Jun 2014 12:10:52 -0700 Subject: [PATCH 0774/3726] used quoter in native driver --- cqlengine/statements.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cqlengine/statements.py b/cqlengine/statements.py index ac4f281d6f..dd053d7c0f 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -36,7 +36,7 @@ def __str__(self): class InQuoter(ValueQuoter): def __unicode__(self): - from cql.query import cql_quote + from cassandra.encoder import cql_quote return '(' + ', '.join([cql_quote(v) for v in self.value]) + ')' From 65975ccf77abdf5ce0e6b2f07362080caf724bd4 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 25 Jun 2014 12:11:30 -0700 Subject: [PATCH 0775/3726] version bump, removed old cql --- cqlengine/VERSION | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cqlengine/VERSION b/cqlengine/VERSION index 1985d91413..7ffdfa1cad 100644 --- a/cqlengine/VERSION +++ b/cqlengine/VERSION @@ -1 +1 @@ -0.15.3 +0.15.4 diff --git a/setup.py b/setup.py index 2d6740db31..a427f36bee 100644 --- a/setup.py +++ b/setup.py @@ -31,7 +31,7 @@ "Topic :: Software Development :: Libraries :: Python Modules", ], keywords='cassandra,cql,orm', - install_requires = ['cassandra-driver >= 2.0.0', "cql"], + install_requires = ['cassandra-driver >= 2.0.0'], author='Blake Eggleston, Jon Haddad', author_email='bdeggleston@gmail.com', url='https://github.com/cqlengine/cqlengine', From 380ddc5a03b9e086a2edabf6b8f5399c809c71e2 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 25 Jun 2014 12:12:44 -0700 Subject: [PATCH 0776/3726] removed last cql.query reference --- cqlengine/statements.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cqlengine/statements.py b/cqlengine/statements.py index dd053d7c0f..1619da7f7d 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -13,7 +13,7 @@ def __init__(self, value): self.value = value def __unicode__(self): - from cql.query import cql_quote + from cassandra.encoder import cql_quote if isinstance(self.value, bool): return 'true' if self.value else 'false' elif isinstance(self.value, (list, tuple)): From 3fa7148dea870adeee511feabd9de88ace715262 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 25 Jun 2014 12:13:03 -0700 Subject: [PATCH 0777/3726] removed last instance of cql.quote --- cqlengine/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cqlengine/VERSION b/cqlengine/VERSION index 7ffdfa1cad..1282fff53b 100644 --- a/cqlengine/VERSION +++ b/cqlengine/VERSION @@ -1 +1 @@ -0.15.4 +0.15.5 From 2c58106e87e0aa325e456f1bb8eefbca4666d677 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 26 Jun 2014 16:18:00 -0700 Subject: [PATCH 0778/3726] LIMIT can be used in count queries --- cqlengine/statements.py | 2 +- cqlengine/tests/query/test_queryset.py | 3 ++- cqlengine/tests/statements/test_select_statement.py | 4 ++-- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/cqlengine/statements.py b/cqlengine/statements.py index 1619da7f7d..18434d230e 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -549,7 +549,7 @@ def __unicode__(self): if self.order_by and not self.count: qs += ['ORDER BY {}'.format(', '.join(unicode(o) for o in self.order_by))] - if self.limit and not self.count: + if self.limit: qs += ['LIMIT {}'.format(self.limit)] if self.allow_filtering: diff --git a/cqlengine/tests/query/test_queryset.py b/cqlengine/tests/query/test_queryset.py index 97c5c4e8b0..3058d375f8 100644 --- a/cqlengine/tests/query/test_queryset.py +++ b/cqlengine/tests/query/test_queryset.py @@ -239,7 +239,8 @@ def test_query_limit_count(self): assert TestModel.objects.count() == 12 q = TestModel.objects(TestModel.test_id == 0).limit(2) - assert q.count() == 2 + result = q.count() + self.assertEqual(2, result) def test_iteration(self): """ Tests that iterating over a query set pulls back all of the expected results """ diff --git a/cqlengine/tests/statements/test_select_statement.py b/cqlengine/tests/statements/test_select_statement.py index 3afe34e2d9..9c466485a5 100644 --- a/cqlengine/tests/statements/test_select_statement.py +++ b/cqlengine/tests/statements/test_select_statement.py @@ -35,8 +35,8 @@ def test_where_clause_rendering(self): def test_count(self): ss = SelectStatement('table', count=True, limit=10, order_by='d') ss.add_where_clause(WhereClause('a', EqualsOperator(), 'b')) - self.assertEqual(unicode(ss), 'SELECT COUNT(*) FROM table WHERE "a" = %(0)s', unicode(ss)) - self.assertNotIn('LIMIT', unicode(ss)) + self.assertEqual(unicode(ss), 'SELECT COUNT(*) FROM table WHERE "a" = %(0)s LIMIT 10', unicode(ss)) + self.assertIn('LIMIT', unicode(ss)) self.assertNotIn('ORDER', unicode(ss)) def test_context(self): From d6ae786bc56e5669356e0a180462a92a6d569241 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 26 Jun 2014 16:30:21 -0700 Subject: [PATCH 0779/3726] removed default cqlengine keyspace #217 --- cqlengine/connection.py | 2 +- cqlengine/models.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index bd000bd0cc..1b4f6b0a2b 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -31,7 +31,7 @@ class CQLConnectionError(CQLEngineException): pass def setup( hosts, - default_keyspace=None, + default_keyspace, consistency=ConsistencyLevel.ONE, **kwargs): """ diff --git a/cqlengine/models.py b/cqlengine/models.py index 028ab658e6..4697cd8fa5 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -11,7 +11,7 @@ class ModelDefinitionException(ModelException): pass class PolyMorphicModelException(ModelException): pass -DEFAULT_KEYSPACE = 'cqlengine' +DEFAULT_KEYSPACE = None class hybrid_classmethod(object): From 36c98d3f9502f42a7060140b5a22685356a9b240 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 26 Jun 2014 16:35:44 -0700 Subject: [PATCH 0780/3726] make sure setup before querying --- cqlengine/connection.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index 1b4f6b0a2b..988d5fd271 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -60,6 +60,9 @@ def setup( def execute(query, params=None, consistency_level=None): + if not session: + raise CQLEngineException("It is required to setup() cqlengine before executing queries") + if consistency_level is None: consistency_level = default_consistency_level @@ -75,6 +78,7 @@ def execute(query, params=None, consistency_level=None): query = SimpleStatement(query, consistency_level=consistency_level) + params = params or {} result = session.execute(query, params) From 96d91793eb8a77505b7608ee0b1253e28196c74e Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 26 Jun 2014 17:42:14 -0700 Subject: [PATCH 0781/3726] not allowing non-models to be passed to sync_table --- cqlengine/connection.py | 2 +- cqlengine/management.py | 6 +++++- cqlengine/tests/management/test_management.py | 10 +++++++++- 3 files changed, 15 insertions(+), 3 deletions(-) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index 988d5fd271..da86e60053 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -62,7 +62,7 @@ def execute(query, params=None, consistency_level=None): if not session: raise CQLEngineException("It is required to setup() cqlengine before executing queries") - + if consistency_level is None: consistency_level = default_consistency_level diff --git a/cqlengine/management.py b/cqlengine/management.py index 54128ceb23..a47e9e9275 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -12,7 +12,7 @@ Field = namedtuple('Field', ['name', 'type']) logger = logging.getLogger(__name__) - +from cqlengine.models import Model # system keyspaces schema_columnfamilies = NamedTable('system', 'schema_columnfamilies') @@ -74,9 +74,13 @@ def sync_table(model, create_missing_keyspace=True): :type create_missing_keyspace: bool """ + if not issubclass(model, Model): + raise CQLEngineException("Models must be derived from base Model.") + if model.__abstract__: raise CQLEngineException("cannot create table from abstract model") + #construct query string cf_name = model.column_family_name() raw_cf_name = model.column_family_name(include_keyspace=False) diff --git a/cqlengine/tests/management/test_management.py b/cqlengine/tests/management/test_management.py index 67e77a356f..b7c492db89 100644 --- a/cqlengine/tests/management/test_management.py +++ b/cqlengine/tests/management/test_management.py @@ -1,4 +1,4 @@ - +from cqlengine.exceptions import CQLEngineException from cqlengine.management import get_fields, sync_table, drop_table from cqlengine.tests.base import BaseCassEngTestCase from cqlengine import management @@ -140,3 +140,11 @@ def test_sync_table_works_with_primary_keys_only_tables(self): table_settings = management.get_table_settings(PrimaryKeysOnlyModel) assert SizeTieredCompactionStrategy in table_settings.options['compaction_strategy_class'] + +class NonModelFailureTest(BaseCassEngTestCase): + class FakeModel(object): + pass + + def test_failure(self): + with self.assertRaises(CQLEngineException): + sync_table(self.FakeModel) From e824a239362f861636055ff85e16407054922e29 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 26 Jun 2014 19:37:14 -0700 Subject: [PATCH 0782/3726] added faq, needs formatting --- cqlengine/statements.py | 4 ++++ docs/topics/faq.rst | 7 +++++++ 2 files changed, 11 insertions(+) create mode 100644 docs/topics/faq.rst diff --git a/cqlengine/statements.py b/cqlengine/statements.py index 18434d230e..549b529df7 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -523,6 +523,10 @@ def __init__(self, limit=None, allow_filtering=False): + """ + :param where + :type where list of cqlengine.statements.WhereClause + """ super(SelectStatement, self).__init__( table, consistency=consistency, diff --git a/docs/topics/faq.rst b/docs/topics/faq.rst new file mode 100644 index 0000000000..f0f98e4526 --- /dev/null +++ b/docs/topics/faq.rst @@ -0,0 +1,7 @@ +========================== +Frequently Asked Questions +========================== + +Q: Why does calling my Model(field=blah, field2=blah2) not work? + +A: The __init__() of a model is used by cqlengine internally. If you want to create a new row in the database, use create(). From 3d036f2e0ca562fdef07df1cceedbec595c71fa8 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 26 Jun 2014 19:40:04 -0700 Subject: [PATCH 0783/3726] formatting for faq --- docs/index.rst | 1 + docs/topics/faq.rst | 1 + 2 files changed, 2 insertions(+) diff --git a/docs/index.rst b/docs/index.rst index e9663bc85d..99a08efc3a 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -31,6 +31,7 @@ Contents: topics/columns topics/connection topics/manage_schemas + topics/faq .. _getting-started: diff --git a/docs/topics/faq.rst b/docs/topics/faq.rst index f0f98e4526..0b58b6358e 100644 --- a/docs/topics/faq.rst +++ b/docs/topics/faq.rst @@ -3,5 +3,6 @@ Frequently Asked Questions ========================== Q: Why does calling my Model(field=blah, field2=blah2) not work? +------------------------------------------------------------------- A: The __init__() of a model is used by cqlengine internally. If you want to create a new row in the database, use create(). From 0d3dbbed6c93b18d7e655cd1fdb51ac3f726d267 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 26 Jun 2014 19:57:08 -0700 Subject: [PATCH 0784/3726] document named table --- docs/topics/queryset.rst | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/docs/topics/queryset.rst b/docs/topics/queryset.rst index a8dca85356..e1e172645a 100644 --- a/docs/topics/queryset.rst +++ b/docs/topics/queryset.rst @@ -227,7 +227,7 @@ Token Function last = first_page[-1] next_page = list(query.filter(pk__token__gt=cqlengine.Token(last.pk))) -QuerySets are imutable +QuerySets are immutable ====================== When calling any method that changes a queryset, the method does not actually change the queryset object it's called on, but returns a new queryset object with the attributes of the original queryset, plus the attributes added in the method call. @@ -505,3 +505,23 @@ QuerySet method reference # add items to a map Row.objects(row_id=5).update(map_column__update={1: 2, 3: 4}) + + +Named Tables +=================== + +Named tables are a way of querying a table without creating an class. They're useful for querying system tables or exploring an unfamiliar database. + + + .. code-block:: python + + from cqlengine.connection import setup + setup("127.0.0.1", "cqlengine_test") + + from cqlengine.named import NamedTable + user = NamedTable("cqlengine_test", "user") + user.objects() + user.objects()[0] + + # {u'pk': 1, u't': datetime.datetime(2014, 6, 26, 17, 10, 31, 774000)} + From 1ccc7e64fb1676cf9ec2e76ec96473f77879b3b0 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 26 Jun 2014 21:23:05 -0700 Subject: [PATCH 0785/3726] load test --- cqlengine/tests/test_load.py | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) create mode 100644 cqlengine/tests/test_load.py diff --git a/cqlengine/tests/test_load.py b/cqlengine/tests/test_load.py new file mode 100644 index 0000000000..16a42eac88 --- /dev/null +++ b/cqlengine/tests/test_load.py @@ -0,0 +1,33 @@ +import os +from unittest import TestCase, skipUnless + +from cqlengine import Model, Integer +from cqlengine.management import sync_table +from cqlengine.tests import base +import resource +import gc + +class LoadTest(Model): + k = Integer(primary_key=True) + v = Integer() + + +@skipUnless("LOADTEST" in os.environ, "LOADTEST not on") +def test_lots_of_queries(): + sync_table(LoadTest) + import objgraph + gc.collect() + objgraph.show_most_common_types() + + print "Starting..." + + for i in range(1000000): + if i % 25000 == 0: + # print memory statistic + print "Memory usage: %s" % (resource.getrusage(resource.RUSAGE_SELF).ru_maxrss) + + LoadTest.create(k=i, v=i) + + objgraph.show_most_common_types() + + raise Exception("you shouldn't be here") From 5a6f9c89ad69f70ce27e1331fc3e8976d83e1323 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Fri, 27 Jun 2014 11:04:01 -0700 Subject: [PATCH 0786/3726] fixed issue #162, allows non equality filtering with allow_filtering --- cqlengine/query.py | 2 +- cqlengine/tests/query/test_queryset.py | 26 +++++++++++++++++++++++++- 2 files changed, 26 insertions(+), 2 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index 320084ef00..ee776cd227 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -631,7 +631,7 @@ def _validate_select_where(self): #check that there's either a = or IN relationship with a primary key or indexed field equal_ops = [self.model._columns.get(w.field) for w in self._where if isinstance(w.operator, EqualsOperator)] token_comparison = any([w for w in self._where if isinstance(w.value, Token)]) - if not any([w.primary_key or w.index for w in equal_ops]) and not token_comparison: + if not any([w.primary_key or w.index for w in equal_ops]) and not token_comparison and not self._allow_filtering: raise QueryException('Where clauses require either a "=" or "IN" comparison with either a primary key or indexed field') if not self._allow_filtering: diff --git a/cqlengine/tests/query/test_queryset.py b/cqlengine/tests/query/test_queryset.py index 3058d375f8..700eae3d46 100644 --- a/cqlengine/tests/query/test_queryset.py +++ b/cqlengine/tests/query/test_queryset.py @@ -1,12 +1,14 @@ from datetime import datetime import time +from unittest import TestCase from uuid import uuid1, uuid4 +import uuid from cqlengine.tests.base import BaseCassEngTestCase from cqlengine.exceptions import ModelException from cqlengine import functions -from cqlengine.management import create_table, drop_table +from cqlengine.management import create_table, drop_table, sync_table from cqlengine.management import delete_table from cqlengine.models import Model from cqlengine import columns @@ -369,6 +371,28 @@ def test_allow_filtering_flag(self): """ +def test_non_quality_filtering(): + class NonEqualityFilteringModel(Model): + example_id = columns.UUID(primary_key=True, default=uuid.uuid4) + sequence_id = columns.Integer(primary_key=True) # sequence_id is a clustering key + example_type = columns.Integer(index=True) + created_at = columns.DateTime() + + drop_table(NonEqualityFilteringModel) + sync_table(NonEqualityFilteringModel) + + # setup table, etc. + + NonEqualityFilteringModel.create(sequence_id=1, example_type=0, created_at=datetime.now()) + NonEqualityFilteringModel.create(sequence_id=3, example_type=0, created_at=datetime.now()) + NonEqualityFilteringModel.create(sequence_id=5, example_type=1, created_at=datetime.now()) + + qA = NonEqualityFilteringModel.objects(NonEqualityFilteringModel.sequence_id > 3).allow_filtering() + num = qA.count() + assert num == 1, num + + + class TestQuerySetOrdering(BaseQuerySetUsage): def test_order_by_success_case(self): From 8e18ab025029f20afb8549d6a7f2a96a4e1a0da0 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Fri, 27 Jun 2014 12:50:38 -0700 Subject: [PATCH 0787/3726] fixed changelog --- changelog | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/changelog b/changelog index f967fec06a..e30733c62b 100644 --- a/changelog +++ b/changelog @@ -1,5 +1,14 @@ CHANGELOG +0.16.0 + +* Check number of elements in collections. +* Add support for table properties. + + +0.15.0 +* native driver integration + 0.14.0 * fix for setting map to empty (Lifto) @@ -7,11 +16,6 @@ CHANGELOG * use stable version of sure package (maedhroz) * performance improvements -<<<<<<< HEAD -======= -* Check number of elements in collections. -* Add support for table properties. ->>>>>>> 3c56fec1d8d79d6037d3b83703608d4889994012 0.13.0 * adding support for post batch callbacks (thanks Daniel Dotsenko github.com/dvdotsenko) From 36c3ae905b3f5c79e2a458d6593c782344797c1a Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Fri, 27 Jun 2014 13:36:25 -0700 Subject: [PATCH 0788/3726] disallow None as a value in a filter #172 --- cqlengine/models.py | 3 ++ cqlengine/query.py | 3 ++ cqlengine/tests/model/test_model_io.py | 58 ++++++++++++++++---------- 3 files changed, 43 insertions(+), 21 deletions(-) diff --git a/cqlengine/models.py b/cqlengine/models.py index 871fe401c1..b7f80f60eb 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -500,6 +500,9 @@ def all(cls): @classmethod def filter(cls, *args, **kwargs): + # if kwargs.values().count(None): + # raise CQLEngineException("Cannot pass None as a filter") + return cls.objects.filter(*args, **kwargs) @classmethod diff --git a/cqlengine/query.py b/cqlengine/query.py index ee776cd227..0a64a5c78a 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -405,6 +405,9 @@ def filter(self, *args, **kwargs): :rtype: AbstractQuerySet """ #add arguments to the where clause filters + if kwargs.values().count(None): + raise CQLEngineException("None values on filter are not allowed") + clone = copy.deepcopy(self) for operator in args: if not isinstance(operator, WhereClause): diff --git a/cqlengine/tests/model/test_model_io.py b/cqlengine/tests/model/test_model_io.py index 7c26bae4a3..e114d75a86 100644 --- a/cqlengine/tests/model/test_model_io.py +++ b/cqlengine/tests/model/test_model_io.py @@ -2,10 +2,11 @@ import random from datetime import date from operator import itemgetter +from cqlengine.exceptions import CQLEngineException from cqlengine.tests.base import BaseCassEngTestCase -from cqlengine.management import create_table -from cqlengine.management import delete_table +from cqlengine.management import sync_table +from cqlengine.management import drop_table from cqlengine.models import Model from cqlengine import columns @@ -27,12 +28,12 @@ class TestModelIO(BaseCassEngTestCase): @classmethod def setUpClass(cls): super(TestModelIO, cls).setUpClass() - create_table(TestModel) + sync_table(TestModel) @classmethod def tearDownClass(cls): super(TestModelIO, cls).tearDownClass() - delete_table(TestModel) + drop_table(TestModel) def test_model_save_and_load(self): """ @@ -109,8 +110,8 @@ def test_column_deleting_works_properly(self): def test_a_sensical_error_is_raised_if_you_try_to_create_a_table_twice(self): """ """ - create_table(TestModel) - create_table(TestModel) + sync_table(TestModel) + sync_table(TestModel) class TestMultiKeyModel(Model): @@ -125,13 +126,13 @@ class TestDeleting(BaseCassEngTestCase): @classmethod def setUpClass(cls): super(TestDeleting, cls).setUpClass() - delete_table(TestMultiKeyModel) - create_table(TestMultiKeyModel) + drop_table(TestMultiKeyModel) + sync_table(TestMultiKeyModel) @classmethod def tearDownClass(cls): super(TestDeleting, cls).tearDownClass() - delete_table(TestMultiKeyModel) + drop_table(TestMultiKeyModel) def test_deleting_only_deletes_one_object(self): partition = random.randint(0,1000) @@ -152,13 +153,13 @@ class TestUpdating(BaseCassEngTestCase): @classmethod def setUpClass(cls): super(TestUpdating, cls).setUpClass() - delete_table(TestMultiKeyModel) - create_table(TestMultiKeyModel) + drop_table(TestMultiKeyModel) + sync_table(TestMultiKeyModel) @classmethod def tearDownClass(cls): super(TestUpdating, cls).tearDownClass() - delete_table(TestMultiKeyModel) + drop_table(TestMultiKeyModel) def setUp(self): super(TestUpdating, self).setUp() @@ -201,13 +202,13 @@ class TestCanUpdate(BaseCassEngTestCase): @classmethod def setUpClass(cls): super(TestCanUpdate, cls).setUpClass() - delete_table(TestModel) - create_table(TestModel) + drop_table(TestModel) + sync_table(TestModel) @classmethod def tearDownClass(cls): super(TestCanUpdate, cls).tearDownClass() - delete_table(TestModel) + drop_table(TestModel) def test_success_case(self): tm = TestModel(count=8, text='123456789') @@ -245,8 +246,8 @@ class IndexDefinitionModel(Model): class TestIndexedColumnDefinition(BaseCassEngTestCase): def test_exception_isnt_raised_if_an_index_is_defined_more_than_once(self): - create_table(IndexDefinitionModel) - create_table(IndexDefinitionModel) + sync_table(IndexDefinitionModel) + sync_table(IndexDefinitionModel) class ReservedWordModel(Model): token = columns.Text(primary_key=True) @@ -257,7 +258,7 @@ class TestQueryQuoting(BaseCassEngTestCase): def test_reserved_cql_words_can_be_used_as_column_names(self): """ """ - create_table(ReservedWordModel) + sync_table(ReservedWordModel) model1 = ReservedWordModel.create(token='1', insert=5) @@ -279,13 +280,13 @@ class TestQuerying(BaseCassEngTestCase): @classmethod def setUpClass(cls): super(TestQuerying, cls).setUpClass() - delete_table(TestQueryModel) - create_table(TestQueryModel) + drop_table(TestQueryModel) + sync_table(TestQueryModel) @classmethod def tearDownClass(cls): super(TestQuerying, cls).tearDownClass() - delete_table(TestQueryModel) + drop_table(TestQueryModel) def test_query_with_date(self): uid = uuid4() @@ -300,3 +301,18 @@ def test_query_with_date(self): assert inst.test_id == uid assert inst.date == day + +def test_none_filter_fails(): + class NoneFilterModel(Model): + pk = columns.Integer(primary_key=True) + v = columns.Integer() + sync_table(NoneFilterModel) + + try: + NoneFilterModel.objects(pk=None) + raise Exception("fail") + except CQLEngineException as e: + pass + + + From 393e2029a69eee48d5f21a53bb06a266adf0c478 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Fri, 27 Jun 2014 13:53:47 -0700 Subject: [PATCH 0789/3726] create_table > sync_table --- cqlengine/tests/model/test_class_construction.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/cqlengine/tests/model/test_class_construction.py b/cqlengine/tests/model/test_class_construction.py index 517798f2f4..4f7ee53d5e 100644 --- a/cqlengine/tests/model/test_class_construction.py +++ b/cqlengine/tests/model/test_class_construction.py @@ -262,9 +262,9 @@ def test_attempting_to_save_abstract_model_fails(self): def test_attempting_to_create_abstract_table_fails(self): """ Attempting to create a table from an abstract model should fail """ - from cqlengine.management import create_table + from cqlengine.management import sync_table with self.assertRaises(CQLEngineException): - create_table(AbstractModelWithFullCols) + sync_table(AbstractModelWithFullCols) def test_attempting_query_on_abstract_model_fails(self): """ Tests attempting to execute query with an abstract model fails """ @@ -279,8 +279,8 @@ def test_abstract_columns_are_inherited(self): def test_concrete_class_table_creation_cycle(self): """ Tests that models with inherited abstract classes can be created, and have io performed """ - from cqlengine.management import create_table, delete_table - create_table(ConcreteModelWithCol) + from cqlengine.management import sync_table, delete_table + sync_table(ConcreteModelWithCol) w1 = ConcreteModelWithCol.create(pkey=5, data=6) w2 = ConcreteModelWithCol.create(pkey=6, data=7) From 17811251f6fc22aa95fe8398c77b989cef9e3d36 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Fri, 27 Jun 2014 13:54:13 -0700 Subject: [PATCH 0790/3726] delete_table no more --- cqlengine/tests/model/test_class_construction.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cqlengine/tests/model/test_class_construction.py b/cqlengine/tests/model/test_class_construction.py index 4f7ee53d5e..c8a54eb1c2 100644 --- a/cqlengine/tests/model/test_class_construction.py +++ b/cqlengine/tests/model/test_class_construction.py @@ -279,7 +279,7 @@ def test_abstract_columns_are_inherited(self): def test_concrete_class_table_creation_cycle(self): """ Tests that models with inherited abstract classes can be created, and have io performed """ - from cqlengine.management import sync_table, delete_table + from cqlengine.management import sync_table, drop_table sync_table(ConcreteModelWithCol) w1 = ConcreteModelWithCol.create(pkey=5, data=6) @@ -293,7 +293,7 @@ def test_concrete_class_table_creation_cycle(self): assert w2.pkey == r2.pkey assert w2.data == r2.data - delete_table(ConcreteModelWithCol) + drop_table(ConcreteModelWithCol) class TestCustomQuerySet(BaseCassEngTestCase): From dc80fb9912eda98c2e152eae2f2e6dd8833eb5e1 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Fri, 27 Jun 2014 13:57:48 -0700 Subject: [PATCH 0791/3726] removed deprecated calls --- cqlengine/tests/columns/test_validation.py | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/cqlengine/tests/columns/test_validation.py b/cqlengine/tests/columns/test_validation.py index 878186d22e..749066f32d 100644 --- a/cqlengine/tests/columns/test_validation.py +++ b/cqlengine/tests/columns/test_validation.py @@ -24,7 +24,7 @@ from cqlengine.columns import Float from cqlengine.columns import Decimal -from cqlengine.management import create_table, delete_table, sync_table, drop_table +from cqlengine.management import sync_table, drop_table from cqlengine.models import Model import sys @@ -38,12 +38,12 @@ class DatetimeTest(Model): @classmethod def setUpClass(cls): super(TestDatetime, cls).setUpClass() - create_table(cls.DatetimeTest) + sync_table(cls.DatetimeTest) @classmethod def tearDownClass(cls): super(TestDatetime, cls).tearDownClass() - delete_table(cls.DatetimeTest) + drop_table(cls.DatetimeTest) def test_datetime_io(self): now = datetime.now() @@ -126,12 +126,12 @@ class DateTest(Model): @classmethod def setUpClass(cls): super(TestDate, cls).setUpClass() - create_table(cls.DateTest) + sync_table(cls.DateTest) @classmethod def tearDownClass(cls): super(TestDate, cls).tearDownClass() - delete_table(cls.DateTest) + drop_table(cls.DateTest) def test_date_io(self): today = date.today() @@ -164,12 +164,12 @@ class DecimalTest(Model): @classmethod def setUpClass(cls): super(TestDecimal, cls).setUpClass() - create_table(cls.DecimalTest) + sync_table(cls.DecimalTest) @classmethod def tearDownClass(cls): super(TestDecimal, cls).tearDownClass() - delete_table(cls.DecimalTest) + drop_table(cls.DecimalTest) def test_decimal_io(self): dt = self.DecimalTest.objects.create(test_id=0, dec_val=D('0.00')) @@ -188,12 +188,12 @@ class UUIDTest(Model): @classmethod def setUpClass(cls): super(TestUUID, cls).setUpClass() - create_table(cls.UUIDTest) + sync_table(cls.UUIDTest) @classmethod def tearDownClass(cls): super(TestUUID, cls).tearDownClass() - delete_table(cls.UUIDTest) + drop_table(cls.UUIDTest) def test_uuid_str_with_dashes(self): a_uuid = uuid4() @@ -215,12 +215,12 @@ class TimeUUIDTest(Model): @classmethod def setUpClass(cls): super(TestTimeUUID, cls).setUpClass() - create_table(cls.TimeUUIDTest) + sync_table(cls.TimeUUIDTest) @classmethod def tearDownClass(cls): super(TestTimeUUID, cls).tearDownClass() - delete_table(cls.TimeUUIDTest) + drop_table(cls.TimeUUIDTest) def test_timeuuid_io(self): """ From 2c9b49ab805a086eefcb8130c312b315d85edfff Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Fri, 27 Jun 2014 13:58:40 -0700 Subject: [PATCH 0792/3726] removing deprecated calls --- cqlengine/tests/columns/test_counter_column.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/cqlengine/tests/columns/test_counter_column.py b/cqlengine/tests/columns/test_counter_column.py index c260f38ed0..725b83905d 100644 --- a/cqlengine/tests/columns/test_counter_column.py +++ b/cqlengine/tests/columns/test_counter_column.py @@ -2,7 +2,7 @@ from cqlengine import Model from cqlengine import columns -from cqlengine.management import create_table, delete_table +from cqlengine.management import sync_table, drop_table from cqlengine.models import ModelDefinitionException from cqlengine.tests.base import BaseCassEngTestCase @@ -46,13 +46,13 @@ class TestCounterColumn(BaseCassEngTestCase): @classmethod def setUpClass(cls): super(TestCounterColumn, cls).setUpClass() - delete_table(TestCounterModel) - create_table(TestCounterModel) + drop_table(TestCounterModel) + sync_table(TestCounterModel) @classmethod def tearDownClass(cls): super(TestCounterColumn, cls).tearDownClass() - delete_table(TestCounterModel) + drop_table(TestCounterModel) def test_updates(self): """ Tests that counter updates work as intended """ From bc466895f02a47d82d84bef2cd19b11a756ea593 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Fri, 27 Jun 2014 14:00:15 -0700 Subject: [PATCH 0793/3726] removed deprecated calls --- cqlengine/tests/query/test_queryset.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/cqlengine/tests/query/test_queryset.py b/cqlengine/tests/query/test_queryset.py index 700eae3d46..ced3891261 100644 --- a/cqlengine/tests/query/test_queryset.py +++ b/cqlengine/tests/query/test_queryset.py @@ -8,8 +8,8 @@ from cqlengine.exceptions import ModelException from cqlengine import functions -from cqlengine.management import create_table, drop_table, sync_table -from cqlengine.management import delete_table +from cqlengine.management import sync_table, drop_table, sync_table +from cqlengine.management import drop_table from cqlengine.models import Model from cqlengine import columns from cqlengine import query @@ -174,11 +174,11 @@ class BaseQuerySetUsage(BaseCassEngTestCase): @classmethod def setUpClass(cls): super(BaseQuerySetUsage, cls).setUpClass() - delete_table(TestModel) - delete_table(IndexedTestModel) - create_table(TestModel) - create_table(IndexedTestModel) - create_table(TestMultiClusteringModel) + drop_table(TestModel) + drop_table(IndexedTestModel) + sync_table(TestModel) + sync_table(IndexedTestModel) + sync_table(TestMultiClusteringModel) TestModel.objects.create(test_id=0, attempt_id=0, description='try1', expected_result=5, test_result=30) TestModel.objects.create(test_id=0, attempt_id=1, description='try2', expected_result=10, test_result=30) @@ -551,12 +551,12 @@ class TestMinMaxTimeUUIDFunctions(BaseCassEngTestCase): @classmethod def setUpClass(cls): super(TestMinMaxTimeUUIDFunctions, cls).setUpClass() - create_table(TimeUUIDQueryModel) + sync_table(TimeUUIDQueryModel) @classmethod def tearDownClass(cls): super(TestMinMaxTimeUUIDFunctions, cls).tearDownClass() - delete_table(TimeUUIDQueryModel) + drop_table(TimeUUIDQueryModel) def test_tzaware_datetime_support(self): """Test that using timezone aware datetime instances works with the From 5e856f8bc9b8ba01c68332ba031083fa5bd9eb9f Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Fri, 27 Jun 2014 14:20:56 -0700 Subject: [PATCH 0794/3726] more deprecation removal --- cqlengine/tests/columns/test_value_io.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/cqlengine/tests/columns/test_value_io.py b/cqlengine/tests/columns/test_value_io.py index 121eb2580c..4b4ac7d200 100644 --- a/cqlengine/tests/columns/test_value_io.py +++ b/cqlengine/tests/columns/test_value_io.py @@ -4,8 +4,8 @@ from cqlengine.tests.base import BaseCassEngTestCase -from cqlengine.management import create_table -from cqlengine.management import delete_table +from cqlengine.management import sync_table +from cqlengine.management import drop_table from cqlengine.models import Model from cqlengine.columns import ValueQuoter from cqlengine import columns @@ -44,7 +44,7 @@ class IOTestModel(Model): pkey = cls.column(primary_key=True) data = cls.column() cls._generated_model = IOTestModel - create_table(cls._generated_model) + sync_table(cls._generated_model) #tupleify the tested values if not isinstance(cls.pkey_val, tuple): @@ -56,7 +56,7 @@ class IOTestModel(Model): def tearDownClass(cls): super(BaseColumnIOTest, cls).tearDownClass() if not cls.column: return - delete_table(cls._generated_model) + drop_table(cls._generated_model) def comparator_converter(self, val): """ If you want to convert the original value used to compare the model vales """ From 578764a3fa5e00223539fd5a72623e8d4629e65a Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Fri, 27 Jun 2014 14:22:38 -0700 Subject: [PATCH 0795/3726] deprecation cleanup --- cqlengine/tests/model/test_equality_operations.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/cqlengine/tests/model/test_equality_operations.py b/cqlengine/tests/model/test_equality_operations.py index a3e592b3dd..c851e6089b 100644 --- a/cqlengine/tests/model/test_equality_operations.py +++ b/cqlengine/tests/model/test_equality_operations.py @@ -2,8 +2,8 @@ from uuid import uuid4 from cqlengine.tests.base import BaseCassEngTestCase -from cqlengine.management import create_table -from cqlengine.management import delete_table +from cqlengine.management import sync_table +from cqlengine.management import drop_table from cqlengine.models import Model from cqlengine import columns @@ -17,7 +17,7 @@ class TestEqualityOperators(BaseCassEngTestCase): @classmethod def setUpClass(cls): super(TestEqualityOperators, cls).setUpClass() - create_table(TestModel) + sync_table(TestModel) def setUp(self): super(TestEqualityOperators, self).setUp() @@ -27,7 +27,7 @@ def setUp(self): @classmethod def tearDownClass(cls): super(TestEqualityOperators, cls).tearDownClass() - delete_table(TestModel) + drop_table(TestModel) def test_an_instance_evaluates_as_equal_to_itself(self): """ From a7c908db7b45fc7f86da291452e4b49c37ddad15 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Fri, 27 Jun 2014 14:23:21 -0700 Subject: [PATCH 0796/3726] removed cql requirement --- requirements.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 62eba662b1..5973311b23 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,3 @@ -cql==1.4.0 ipython==0.13.1 ipdb==0.7 Sphinx==1.1.3 From 6d8f77c740b497c07c53c27b10455c4b4b55848a Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Fri, 27 Jun 2014 14:24:36 -0700 Subject: [PATCH 0797/3726] updated docs --- docs/topics/manage_schemas.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/topics/manage_schemas.rst b/docs/topics/manage_schemas.rst index 7f9c63c1d5..aafaed7596 100644 --- a/docs/topics/manage_schemas.rst +++ b/docs/topics/manage_schemas.rst @@ -26,14 +26,14 @@ Once a connection has been made to Cassandra, you can use the functions in ``cql deletes the keyspace with the given name -.. function:: create_table(model [, create_missing_keyspace=True]) - +.. function:: sync_table(model [, create_missing_keyspace=True]) + :param model: the :class:`~cqlengine.model.Model` class to make a table with :type model: :class:`~cqlengine.model.Model` :param create_missing_keyspace: *Optional* If True, the model's keyspace will be created if it does not already exist. Defaults to ``True`` :type create_missing_keyspace: bool - creates a CQL table for the given model + syncs a python model to cassandra (creates & alters) .. function:: delete_table(model) @@ -42,7 +42,7 @@ Once a connection has been made to Cassandra, you can use the functions in ``cql deletes the CQL table for the given model - + See the example at :ref:`getting-started` From bbae559c3288e9f3a80eb0e4b50d3a6364789146 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Fri, 27 Jun 2014 14:26:16 -0700 Subject: [PATCH 0798/3726] removed last of deprecated code --- cqlengine/tests/query/test_datetime_queries.py | 8 ++++---- docs/topics/manage_schemas.rst | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/cqlengine/tests/query/test_datetime_queries.py b/cqlengine/tests/query/test_datetime_queries.py index 39cc0e332f..c6cb0627b7 100644 --- a/cqlengine/tests/query/test_datetime_queries.py +++ b/cqlengine/tests/query/test_datetime_queries.py @@ -4,8 +4,8 @@ from cqlengine.tests.base import BaseCassEngTestCase from cqlengine.exceptions import ModelException -from cqlengine.management import create_table -from cqlengine.management import delete_table +from cqlengine.management import sync_table +from cqlengine.management import drop_table from cqlengine.models import Model from cqlengine import columns from cqlengine import query @@ -20,7 +20,7 @@ class TestDateTimeQueries(BaseCassEngTestCase): @classmethod def setUpClass(cls): super(TestDateTimeQueries, cls).setUpClass() - create_table(DateTimeQueryTestModel) + sync_table(DateTimeQueryTestModel) cls.base_date = datetime.now() - timedelta(days=10) for x in range(7): @@ -35,7 +35,7 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): super(TestDateTimeQueries, cls).tearDownClass() - delete_table(DateTimeQueryTestModel) + drop_table(DateTimeQueryTestModel) def test_range_query(self): """ Tests that loading from a range of dates works properly """ diff --git a/docs/topics/manage_schemas.rst b/docs/topics/manage_schemas.rst index aafaed7596..1da5b1c9e9 100644 --- a/docs/topics/manage_schemas.rst +++ b/docs/topics/manage_schemas.rst @@ -35,7 +35,7 @@ Once a connection has been made to Cassandra, you can use the functions in ``cql syncs a python model to cassandra (creates & alters) -.. function:: delete_table(model) +.. function:: drop_table(model) :param model: the :class:`~cqlengine.model.Model` class to delete a column family for :type model: :class:`~cqlengine.model.Model` From 39e752cb25709eb7d4a1195e4963d8e9a7ced3bc Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Fri, 27 Jun 2014 14:30:47 -0700 Subject: [PATCH 0799/3726] #218 create_table and delete_table are fully deprecated --- cqlengine/management.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/cqlengine/management.py b/cqlengine/management.py index 8285c2cb85..095e41b4c5 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -59,8 +59,7 @@ def delete_keyspace(name): execute("DROP KEYSPACE {}".format(name)) def create_table(model, create_missing_keyspace=True): - warnings.warn("create_table has been deprecated in favor of sync_table and will be removed in a future release", DeprecationWarning) - sync_table(model, create_missing_keyspace) + raise CQLEngineException("create_table is deprecated, please use sync_table") def sync_table(model, create_missing_keyspace=True): """ @@ -76,7 +75,7 @@ def sync_table(model, create_missing_keyspace=True): if not issubclass(model, Model): raise CQLEngineException("Models must be derived from base Model.") - + if model.__abstract__: raise CQLEngineException("cannot create table from abstract model") @@ -308,8 +307,7 @@ def update_compaction(model): def delete_table(model): - warnings.warn("delete_table has been deprecated in favor of drop_table()", DeprecationWarning) - return drop_table(model) + raise CQLEngineException("delete_table has been deprecated in favor of drop_table()") def drop_table(model): From 8834ec6392884cc020101f30f36b911a9850eea7 Mon Sep 17 00:00:00 2001 From: Michael Cyrulnik Date: Sat, 28 Jun 2014 20:42:47 -0400 Subject: [PATCH 0800/3726] delayed connect. use setup(delayed_connect=True) --- cqlengine/connection.py | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index bd000bd0cc..89e18c1d07 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -27,12 +27,14 @@ class CQLConnectionError(CQLEngineException): pass cluster = None session = None +delayed_connect_args = None default_consistency_level = None def setup( hosts, default_keyspace=None, consistency=ConsistencyLevel.ONE, + delayed_connect=False, **kwargs): """ Records the hosts and connects to one of them @@ -43,8 +45,10 @@ def setup( :type default_keyspace: str :param consistency: The global consistency level :type consistency: int + :param delayed_connect: True if should not connect until first use + :type delayed_connect: bool """ - global cluster, session, default_consistency_level + global cluster, session, default_consistency_level, delayed_connect_args if 'username' in kwargs or 'password' in kwargs: raise CQLEngineException("Username & Password are now handled by using the native driver's auth_provider") @@ -54,12 +58,18 @@ def setup( models.DEFAULT_KEYSPACE = default_keyspace default_consistency_level = consistency + if delayed_connect: + delayed_connect_args = (hosts, default_keyspace, consistency, kwargs) + return + cluster = Cluster(hosts, **kwargs) session = cluster.connect() session.row_factory = dict_factory def execute(query, params=None, consistency_level=None): + handle_delayed_connect() + if consistency_level is None: consistency_level = default_consistency_level @@ -82,7 +92,16 @@ def execute(query, params=None, consistency_level=None): def get_session(): + handle_delayed_connect() return session def get_cluster(): + handle_delayed_connect() return cluster + +def handle_delayed_connect(): + global delayed_connect_args + if delayed_connect_args: + hosts, default_keyspace, consistency, kwargs = delayed_connect_args + delayed_connect_args = None + setup(hosts, default_keyspace, consistency, **kwargs) From 533ab80060030701a8ab0e2108fe1632272f502f Mon Sep 17 00:00:00 2001 From: "mission.liao" Date: Tue, 1 Jul 2014 18:48:58 +0800 Subject: [PATCH 0801/3726] 'IF NOT EXISTS' for insert statement The usage is just like 'ttl'. --- cqlengine/models.py | 24 +++++- cqlengine/query.py | 15 +++- cqlengine/statements.py | 21 +++++ cqlengine/tests/test_checkexist.py | 129 +++++++++++++++++++++++++++++ 4 files changed, 184 insertions(+), 5 deletions(-) create mode 100644 cqlengine/tests/test_checkexist.py diff --git a/cqlengine/models.py b/cqlengine/models.py index 028ab658e6..17d403df9c 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -111,6 +111,23 @@ def timestamp_setter(ts): def __call__(self, *args, **kwargs): raise NotImplementedError +class CheckExistDescriptor(object): + """ + return a query set descriptor with a check-exist flag specified + """ + def __get__(self, instance, model): + if instance: + # instance method + def checkexist_setter(ce): + instance._check_exist = ce + return instance + return checkexist_setter + + return model.objects.check_exist + + def __call__(self, *args, **kwargs): + raise NotImplementedError + class ConsistencyDescriptor(object): """ returns a query set descriptor if called on Class, instance if it was an instance call @@ -220,6 +237,8 @@ class MultipleObjectsReturned(_MultipleObjectsReturned): pass # custom timestamps, see USING TIMESTAMP X timestamp = TimestampDescriptor() + + check_exist = CheckExistDescriptor() # _len is lazily created by __len__ @@ -261,6 +280,8 @@ class MultipleObjectsReturned(_MultipleObjectsReturned): pass _timestamp = None # optional timestamp to include with the operation (USING TIMESTAMP) + _check_exist = False # optional check_exist flag to check existence before insertion + def __init__(self, **values): self._values = {} self._ttl = None @@ -510,7 +531,8 @@ def save(self): batch=self._batch, ttl=self._ttl, timestamp=self._timestamp, - consistency=self.__consistency__).save() + consistency=self.__consistency__, + check_exist=self._check_exist).save() #reset the value managers for v in self._values.values(): diff --git a/cqlengine/query.py b/cqlengine/query.py index 320084ef00..c95124a64c 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -222,6 +222,7 @@ def __init__(self, model): self._ttl = None self._consistency = None self._timestamp = None + self._check_exist = False @property def column_family_name(self): @@ -568,7 +569,7 @@ def defer(self, fields): def create(self, **kwargs): return self.model(**kwargs).batch(self._batch).ttl(self._ttl).\ - consistency(self._consistency).\ + consistency(self._consistency).check_exist(self._check_exist).\ timestamp(self._timestamp).save() def delete(self): @@ -707,6 +708,11 @@ def timestamp(self, timestamp): clone._timestamp = timestamp return clone + def check_exist(self, check_exist): + clone = copy.deepcopy(self) + clone._check_exist = check_exist + return clone + def update(self, **values): """ Updates the rows in this queryset """ if not values: @@ -766,8 +772,9 @@ class DMLQuery(object): _ttl = None _consistency = None _timestamp = None + _check_exist = False - def __init__(self, model, instance=None, batch=None, ttl=None, consistency=None, timestamp=None): + def __init__(self, model, instance=None, batch=None, ttl=None, consistency=None, timestamp=None, check_exist=False): self.model = model self.column_family_name = self.model.column_family_name() self.instance = instance @@ -775,6 +782,7 @@ def __init__(self, model, instance=None, batch=None, ttl=None, consistency=None, self._ttl = ttl self._consistency = consistency self._timestamp = timestamp + self._check_exist = check_exist def _execute(self, q): if self._batch: @@ -886,7 +894,7 @@ def save(self): if self.instance._has_counter or self.instance._can_update(): return self.update() else: - insert = InsertStatement(self.column_family_name, ttl=self._ttl, timestamp=self._timestamp) + insert = InsertStatement(self.column_family_name, ttl=self._ttl, timestamp=self._timestamp, check_exist=self._check_exist) for name, col in self.instance._columns.items(): val = getattr(self.instance, name, None) if col._val_is_null(val): @@ -902,7 +910,6 @@ def save(self): # caused by pointless update queries if not insert.is_empty: self._execute(insert) - # delete any nulled columns self._delete_null_columns() diff --git a/cqlengine/statements.py b/cqlengine/statements.py index 1619da7f7d..849a7940d5 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -613,6 +613,24 @@ def get_context(self): class InsertStatement(AssignmentStatement): """ an cql insert select statement """ + def __init__(self, + table, + assignments=None, + consistency=None, + where=None, + ttl=None, + timestamp=None, + check_exist=False): + super(InsertStatement, self).__init__( + table, + assignments=assignments, + consistency=consistency, + where=where, + ttl=ttl, + timestamp=timestamp) + + self.check_exist = check_exist + def add_where_clause(self, clause): raise StatementException("Cannot add where clauses to insert statements") @@ -627,6 +645,9 @@ def __unicode__(self): qs += ['VALUES'] qs += ["({})".format(', '.join(['%({})s'.format(v) for v in values]))] + if self.check_exist: + qs += ["IF NOT EXISTS"] + if self.ttl: qs += ["USING TTL {}".format(self.ttl)] diff --git a/cqlengine/tests/test_checkexist.py b/cqlengine/tests/test_checkexist.py new file mode 100644 index 0000000000..7797459e62 --- /dev/null +++ b/cqlengine/tests/test_checkexist.py @@ -0,0 +1,129 @@ +from cqlengine.management import sync_table, drop_table, create_keyspace, delete_keyspace +from cqlengine.tests.base import BaseCassEngTestCase +from cqlengine.models import Model +from cqlengine import columns +from uuid import uuid4 +import mock +from cqlengine.connection import get_session + + +class TestChechExistModel(Model): + + __keyspace__ = 'cqlengine_test_checkexist' + + id = columns.UUID(primary_key=True, default=lambda:uuid4()) + count = columns.Integer() + text = columns.Text(required=False) + + +class BaseCheckExistTest(BaseCassEngTestCase): + + @classmethod + def setUpClass(cls): + super(BaseCheckExistTest, cls).setUpClass() + """ + when receiving an insert statement with 'if not exist', cassandra would + perform a read with QUORUM level. Unittest would be failed if replica_factor + is 3 and one node only. Therefore I have create a new keyspace with + replica_factor:1. + """ + create_keyspace(TestChechExistModel.__keyspace__, replication_factor=1) + sync_table(TestChechExistModel) + + @classmethod + def tearDownClass(cls): + super(BaseCassEngTestCase, cls).tearDownClass() + drop_table(TestChechExistModel) + delete_keyspace(TestChechExistModel.__keyspace__) + + +class CheckExistInsertTests(BaseCheckExistTest): + + def test_insert_check_exist_success(self): + """ tests that insertion with check_exist work as expected """ + id = uuid4() + + TestChechExistModel.create(id=id, count=8, text='123456789') + TestChechExistModel.check_exist(True).create(id=id, count=9, text='111111111111') + + q = TestChechExistModel.objects(id=id) + self.assertEqual(len(q), 1) + + tm = q.first() + self.assertEquals(tm.count, 8) + self.assertEquals(tm.text, '123456789') + + def test_insert_check_exist_failure(self): + """ tests that insertion with check_exist failure """ + id = uuid4() + + TestChechExistModel.create(id=id, count=8, text='123456789') + TestChechExistModel.check_exist(False).create(id=id, count=9, text='111111111111') + + q = TestChechExistModel.objects(id=id) + self.assertEquals(len(q), 1) + + tm = q.first() + self.assertEquals(tm.count, 9) + self.assertEquals(tm.text, '111111111111') + + +class CheckExistModelTest(BaseCheckExistTest): + + def test_check_exist_included_on_create(self): + """ tests that check_exist on models works as expected """ + + session = get_session() + + with mock.patch.object(session, 'execute') as m: + TestChechExistModel.check_exist(True).create(count=8) + + query = m.call_args[0][0].query_string + self.assertIn("IF NOT EXISTS", query) + + def test_check_exist_included_on_save(self): + + session = get_session() + + with mock.patch.object(session, 'execute') as m: + tm = TestChechExistModel(count=8) + tm.check_exist(True).save() + + query = m.call_args[0][0].query_string + self.assertIn("IF NOT EXISTS", query) + + def test_queryset_is_returned_on_class(self): + """ ensure we get a queryset description back """ + qs = TestChechExistModel.check_exist(True) + self.assertTrue(isinstance(qs, TestChechExistModel.__queryset__), type(qs)) + + +class CheckExistInstanceTest(BaseCheckExistTest): + + def test_instance_is_returned(self): + """ + ensures that we properly handle the instance.check_exist(True).save() + scenario + """ + o = TestChechExistModel.create(text="whatever") + o.text = "new stuff" + o = o.check_exist(True) + self.assertEqual(True, o._check_exist) + + def test_check_exist_is_not_include_with_query_on_update(self): + """ + make sure we don't put 'IF NOT EXIST' in update statements + """ + session = get_session() + + o = TestChechExistModel.create(text="whatever") + o.text = "new stuff" + o = o.check_exist(True) + + with mock.patch.object(session, 'execute') as m: + o.save() + + query = m.call_args[0][0].query_string + self.assertNotIn("IF NOT EXIST", query) + + From f5d1f8432b7936bb0db6b2d1cddb3e376b968fef Mon Sep 17 00:00:00 2001 From: Michael Cyrulnik Date: Tue, 1 Jul 2014 16:09:31 -0400 Subject: [PATCH 0802/3726] delayed_connect is now lazy_connect --- cqlengine/connection.py | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index 89e18c1d07..0f9d220c52 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -27,14 +27,14 @@ class CQLConnectionError(CQLEngineException): pass cluster = None session = None -delayed_connect_args = None +lazy_connect_args = None default_consistency_level = None def setup( hosts, default_keyspace=None, consistency=ConsistencyLevel.ONE, - delayed_connect=False, + lazy_connect=False, **kwargs): """ Records the hosts and connects to one of them @@ -45,10 +45,10 @@ def setup( :type default_keyspace: str :param consistency: The global consistency level :type consistency: int - :param delayed_connect: True if should not connect until first use - :type delayed_connect: bool + :param lazy_connect: True if should not connect until first use + :type lazy_connect: bool """ - global cluster, session, default_consistency_level, delayed_connect_args + global cluster, session, default_consistency_level, lazy_connect_args if 'username' in kwargs or 'password' in kwargs: raise CQLEngineException("Username & Password are now handled by using the native driver's auth_provider") @@ -58,8 +58,8 @@ def setup( models.DEFAULT_KEYSPACE = default_keyspace default_consistency_level = consistency - if delayed_connect: - delayed_connect_args = (hosts, default_keyspace, consistency, kwargs) + if lazy_connect: + lazy_connect_args = (hosts, default_keyspace, consistency, kwargs) return cluster = Cluster(hosts, **kwargs) @@ -68,7 +68,7 @@ def setup( def execute(query, params=None, consistency_level=None): - handle_delayed_connect() + handle_lazy_connect() if consistency_level is None: consistency_level = default_consistency_level @@ -92,16 +92,16 @@ def execute(query, params=None, consistency_level=None): def get_session(): - handle_delayed_connect() + handle_lazy_connect() return session def get_cluster(): - handle_delayed_connect() + handle_lazy_connect() return cluster -def handle_delayed_connect(): - global delayed_connect_args - if delayed_connect_args: - hosts, default_keyspace, consistency, kwargs = delayed_connect_args - delayed_connect_args = None +def handle_lazy_connect(): + global lazy_connect_args + if lazy_connect_args: + hosts, default_keyspace, consistency, kwargs = lazy_connect_args + lazy_connect_args = None setup(hosts, default_keyspace, consistency, **kwargs) From 88069cab8f6e2c0071b885f2a2a3ad4f42f08ac0 Mon Sep 17 00:00:00 2001 From: "mission.liao" Date: Wed, 2 Jul 2014 12:53:22 +0800 Subject: [PATCH 0803/3726] rename all 'check_exist' related things to 'if_not_exists' --- cqlengine/models.py | 18 ++--- cqlengine/query.py | 16 ++--- cqlengine/statements.py | 6 +- ...test_checkexist.py => test_ifnotexists.py} | 72 +++++++++---------- 4 files changed, 56 insertions(+), 56 deletions(-) rename cqlengine/tests/{test_checkexist.py => test_ifnotexists.py} (50%) diff --git a/cqlengine/models.py b/cqlengine/models.py index 17d403df9c..ddb7980cfd 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -111,19 +111,19 @@ def timestamp_setter(ts): def __call__(self, *args, **kwargs): raise NotImplementedError -class CheckExistDescriptor(object): +class IfNotExistsDescriptor(object): """ - return a query set descriptor with a check-exist flag specified + return a query set descriptor with a if_not_exists flag specified """ def __get__(self, instance, model): if instance: # instance method - def checkexist_setter(ce): - instance._check_exist = ce + def ifnotexists_setter(ife): + instance._if_not_exists = ife return instance - return checkexist_setter + return ifnotexists_setter - return model.objects.check_exist + return model.objects.if_not_exists def __call__(self, *args, **kwargs): raise NotImplementedError @@ -238,7 +238,7 @@ class MultipleObjectsReturned(_MultipleObjectsReturned): pass # custom timestamps, see USING TIMESTAMP X timestamp = TimestampDescriptor() - check_exist = CheckExistDescriptor() + if_not_exists = IfNotExistsDescriptor() # _len is lazily created by __len__ @@ -280,7 +280,7 @@ class MultipleObjectsReturned(_MultipleObjectsReturned): pass _timestamp = None # optional timestamp to include with the operation (USING TIMESTAMP) - _check_exist = False # optional check_exist flag to check existence before insertion + _if_not_exists = False # optional if_not_exists flag to check existence before insertion def __init__(self, **values): self._values = {} @@ -532,7 +532,7 @@ def save(self): ttl=self._ttl, timestamp=self._timestamp, consistency=self.__consistency__, - check_exist=self._check_exist).save() + if_not_exists=self._if_not_exists).save() #reset the value managers for v in self._values.values(): diff --git a/cqlengine/query.py b/cqlengine/query.py index c95124a64c..430c137270 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -222,7 +222,7 @@ def __init__(self, model): self._ttl = None self._consistency = None self._timestamp = None - self._check_exist = False + self._if_not_exists = False @property def column_family_name(self): @@ -569,7 +569,7 @@ def defer(self, fields): def create(self, **kwargs): return self.model(**kwargs).batch(self._batch).ttl(self._ttl).\ - consistency(self._consistency).check_exist(self._check_exist).\ + consistency(self._consistency).if_not_exists(self._if_not_exists).\ timestamp(self._timestamp).save() def delete(self): @@ -708,9 +708,9 @@ def timestamp(self, timestamp): clone._timestamp = timestamp return clone - def check_exist(self, check_exist): + def if_not_exists(self, if_not_exists): clone = copy.deepcopy(self) - clone._check_exist = check_exist + clone._if_not_exists = if_not_exists return clone def update(self, **values): @@ -772,9 +772,9 @@ class DMLQuery(object): _ttl = None _consistency = None _timestamp = None - _check_exist = False + _if_not_exists = False - def __init__(self, model, instance=None, batch=None, ttl=None, consistency=None, timestamp=None, check_exist=False): + def __init__(self, model, instance=None, batch=None, ttl=None, consistency=None, timestamp=None, if_not_exists=False): self.model = model self.column_family_name = self.model.column_family_name() self.instance = instance @@ -782,7 +782,7 @@ def __init__(self, model, instance=None, batch=None, ttl=None, consistency=None, self._ttl = ttl self._consistency = consistency self._timestamp = timestamp - self._check_exist = check_exist + self._if_not_exists = if_not_exists def _execute(self, q): if self._batch: @@ -894,7 +894,7 @@ def save(self): if self.instance._has_counter or self.instance._can_update(): return self.update() else: - insert = InsertStatement(self.column_family_name, ttl=self._ttl, timestamp=self._timestamp, check_exist=self._check_exist) + insert = InsertStatement(self.column_family_name, ttl=self._ttl, timestamp=self._timestamp, if_not_exists=self._if_not_exists) for name, col in self.instance._columns.items(): val = getattr(self.instance, name, None) if col._val_is_null(val): diff --git a/cqlengine/statements.py b/cqlengine/statements.py index 849a7940d5..62c8dc7a3e 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -620,7 +620,7 @@ def __init__(self, where=None, ttl=None, timestamp=None, - check_exist=False): + if_not_exists=False): super(InsertStatement, self).__init__( table, assignments=assignments, @@ -629,7 +629,7 @@ def __init__(self, ttl=ttl, timestamp=timestamp) - self.check_exist = check_exist + self.if_not_exists = if_not_exists def add_where_clause(self, clause): raise StatementException("Cannot add where clauses to insert statements") @@ -645,7 +645,7 @@ def __unicode__(self): qs += ['VALUES'] qs += ["({})".format(', '.join(['%({})s'.format(v) for v in values]))] - if self.check_exist: + if self.if_not_exists: qs += ["IF NOT EXISTS"] if self.ttl: diff --git a/cqlengine/tests/test_checkexist.py b/cqlengine/tests/test_ifnotexists.py similarity index 50% rename from cqlengine/tests/test_checkexist.py rename to cqlengine/tests/test_ifnotexists.py index 7797459e62..bca737be6e 100644 --- a/cqlengine/tests/test_checkexist.py +++ b/cqlengine/tests/test_ifnotexists.py @@ -7,60 +7,60 @@ from cqlengine.connection import get_session -class TestChechExistModel(Model): +class TestIfNotExistsModel(Model): - __keyspace__ = 'cqlengine_test_checkexist' + __keyspace__ = 'cqlengine_test_ifnotexists' id = columns.UUID(primary_key=True, default=lambda:uuid4()) count = columns.Integer() text = columns.Text(required=False) -class BaseCheckExistTest(BaseCassEngTestCase): +class BaseIfNotExistsTest(BaseCassEngTestCase): @classmethod def setUpClass(cls): - super(BaseCheckExistTest, cls).setUpClass() + super(BaseIfNotExistsTest, cls).setUpClass() """ when receiving an insert statement with 'if not exist', cassandra would perform a read with QUORUM level. Unittest would be failed if replica_factor is 3 and one node only. Therefore I have create a new keyspace with replica_factor:1. """ - create_keyspace(TestChechExistModel.__keyspace__, replication_factor=1) - sync_table(TestChechExistModel) + create_keyspace(TestIfNotExistsModel.__keyspace__, replication_factor=1) + sync_table(TestIfNotExistsModel) @classmethod def tearDownClass(cls): super(BaseCassEngTestCase, cls).tearDownClass() - drop_table(TestChechExistModel) - delete_keyspace(TestChechExistModel.__keyspace__) + drop_table(TestIfNotExistsModel) + delete_keyspace(TestIfNotExistsModel.__keyspace__) -class CheckExistInsertTests(BaseCheckExistTest): +class IfNotExistsInsertTests(BaseIfNotExistsTest): - def test_insert_check_exist_success(self): - """ tests that insertion with check_exist work as expected """ + def test_insert_if_not_exists_success(self): + """ tests that insertion with if_not_exists work as expected """ id = uuid4() - TestChechExistModel.create(id=id, count=8, text='123456789') - TestChechExistModel.check_exist(True).create(id=id, count=9, text='111111111111') + TestIfNotExistsModel.create(id=id, count=8, text='123456789') + TestIfNotExistsModel.if_not_exists(True).create(id=id, count=9, text='111111111111') - q = TestChechExistModel.objects(id=id) + q = TestIfNotExistsModel.objects(id=id) self.assertEqual(len(q), 1) tm = q.first() self.assertEquals(tm.count, 8) self.assertEquals(tm.text, '123456789') - def test_insert_check_exist_failure(self): - """ tests that insertion with check_exist failure """ + def test_insert_if_not_exists_failure(self): + """ tests that insertion with if_not_exists failure """ id = uuid4() - TestChechExistModel.create(id=id, count=8, text='123456789') - TestChechExistModel.check_exist(False).create(id=id, count=9, text='111111111111') + TestIfNotExistsModel.create(id=id, count=8, text='123456789') + TestIfNotExistsModel.if_not_exists(False).create(id=id, count=9, text='111111111111') - q = TestChechExistModel.objects(id=id) + q = TestIfNotExistsModel.objects(id=id) self.assertEquals(len(q), 1) tm = q.first() @@ -68,57 +68,57 @@ def test_insert_check_exist_failure(self): self.assertEquals(tm.text, '111111111111') -class CheckExistModelTest(BaseCheckExistTest): +class IfNotExistsModelTest(BaseIfNotExistsTest): - def test_check_exist_included_on_create(self): - """ tests that check_exist on models works as expected """ + def test_if_not_exists_included_on_create(self): + """ tests that if_not_exists on models works as expected """ session = get_session() with mock.patch.object(session, 'execute') as m: - TestChechExistModel.check_exist(True).create(count=8) + TestIfNotExistsModel.if_not_exists(True).create(count=8) query = m.call_args[0][0].query_string self.assertIn("IF NOT EXISTS", query) - def test_check_exist_included_on_save(self): + def test_if_not_exists_included_on_save(self): session = get_session() with mock.patch.object(session, 'execute') as m: - tm = TestChechExistModel(count=8) - tm.check_exist(True).save() + tm = TestIfNotExistsModel(count=8) + tm.if_not_exists(True).save() query = m.call_args[0][0].query_string self.assertIn("IF NOT EXISTS", query) def test_queryset_is_returned_on_class(self): """ ensure we get a queryset description back """ - qs = TestChechExistModel.check_exist(True) - self.assertTrue(isinstance(qs, TestChechExistModel.__queryset__), type(qs)) + qs = TestIfNotExistsModel.if_not_exists(True) + self.assertTrue(isinstance(qs, TestIfNotExistsModel.__queryset__), type(qs)) -class CheckExistInstanceTest(BaseCheckExistTest): +class IfNotExistsInstanceTest(BaseIfNotExistsTest): def test_instance_is_returned(self): """ - ensures that we properly handle the instance.check_exist(True).save() + ensures that we properly handle the instance.if_not_exists(True).save() scenario """ - o = TestChechExistModel.create(text="whatever") + o = TestIfNotExistsModel.create(text="whatever") o.text = "new stuff" - o = o.check_exist(True) - self.assertEqual(True, o._check_exist) + o = o.if_not_exists(True) + self.assertEqual(True, o._if_not_exists) - def test_check_exist_is_not_include_with_query_on_update(self): + def test_if_not_exists_is_not_include_with_query_on_update(self): """ make sure we don't put 'IF NOT EXIST' in update statements """ session = get_session() - o = TestChechExistModel.create(text="whatever") + o = TestIfNotExistsModel.create(text="whatever") o.text = "new stuff" - o = o.check_exist(True) + o = o.if_not_exists(True) with mock.patch.object(session, 'execute') as m: o.save() From 1893dadc452e1aba1964d8f6101c362841d70cd3 Mon Sep 17 00:00:00 2001 From: Michael Cyrulnik Date: Sat, 28 Jun 2014 20:42:47 -0400 Subject: [PATCH 0804/3726] delayed connect. use setup(delayed_connect=True) --- cqlengine/connection.py | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index da86e60053..82c333bbdb 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -27,12 +27,14 @@ class CQLConnectionError(CQLEngineException): pass cluster = None session = None +delayed_connect_args = None default_consistency_level = None def setup( hosts, default_keyspace, consistency=ConsistencyLevel.ONE, + delayed_connect=False, **kwargs): """ Records the hosts and connects to one of them @@ -43,8 +45,10 @@ def setup( :type default_keyspace: str :param consistency: The global consistency level :type consistency: int + :param delayed_connect: True if should not connect until first use + :type delayed_connect: bool """ - global cluster, session, default_consistency_level + global cluster, session, default_consistency_level, delayed_connect_args if 'username' in kwargs or 'password' in kwargs: raise CQLEngineException("Username & Password are now handled by using the native driver's auth_provider") @@ -54,6 +58,10 @@ def setup( models.DEFAULT_KEYSPACE = default_keyspace default_consistency_level = consistency + if delayed_connect: + delayed_connect_args = (hosts, default_keyspace, consistency, kwargs) + return + cluster = Cluster(hosts, **kwargs) session = cluster.connect() session.row_factory = dict_factory @@ -63,6 +71,8 @@ def execute(query, params=None, consistency_level=None): if not session: raise CQLEngineException("It is required to setup() cqlengine before executing queries") + handle_delayed_connect() + if consistency_level is None: consistency_level = default_consistency_level @@ -86,7 +96,16 @@ def execute(query, params=None, consistency_level=None): def get_session(): + handle_delayed_connect() return session def get_cluster(): + handle_delayed_connect() return cluster + +def handle_delayed_connect(): + global delayed_connect_args + if delayed_connect_args: + hosts, default_keyspace, consistency, kwargs = delayed_connect_args + delayed_connect_args = None + setup(hosts, default_keyspace, consistency, **kwargs) From 4a8d19f01628048e0be7cb900a54b679e49f47df Mon Sep 17 00:00:00 2001 From: Michael Cyrulnik Date: Tue, 1 Jul 2014 16:09:31 -0400 Subject: [PATCH 0805/3726] delayed_connect is now lazy_connect --- cqlengine/connection.py | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index 82c333bbdb..e22f7556dd 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -27,14 +27,14 @@ class CQLConnectionError(CQLEngineException): pass cluster = None session = None -delayed_connect_args = None +lazy_connect_args = None default_consistency_level = None def setup( hosts, default_keyspace, consistency=ConsistencyLevel.ONE, - delayed_connect=False, + lazy_connect=False, **kwargs): """ Records the hosts and connects to one of them @@ -45,10 +45,10 @@ def setup( :type default_keyspace: str :param consistency: The global consistency level :type consistency: int - :param delayed_connect: True if should not connect until first use - :type delayed_connect: bool + :param lazy_connect: True if should not connect until first use + :type lazy_connect: bool """ - global cluster, session, default_consistency_level, delayed_connect_args + global cluster, session, default_consistency_level, lazy_connect_args if 'username' in kwargs or 'password' in kwargs: raise CQLEngineException("Username & Password are now handled by using the native driver's auth_provider") @@ -58,8 +58,8 @@ def setup( models.DEFAULT_KEYSPACE = default_keyspace default_consistency_level = consistency - if delayed_connect: - delayed_connect_args = (hosts, default_keyspace, consistency, kwargs) + if lazy_connect: + lazy_connect_args = (hosts, default_keyspace, consistency, kwargs) return cluster = Cluster(hosts, **kwargs) @@ -71,7 +71,7 @@ def execute(query, params=None, consistency_level=None): if not session: raise CQLEngineException("It is required to setup() cqlengine before executing queries") - handle_delayed_connect() + handle_lazy_connect() if consistency_level is None: consistency_level = default_consistency_level @@ -96,16 +96,16 @@ def execute(query, params=None, consistency_level=None): def get_session(): - handle_delayed_connect() + handle_lazy_connect() return session def get_cluster(): - handle_delayed_connect() + handle_lazy_connect() return cluster -def handle_delayed_connect(): - global delayed_connect_args - if delayed_connect_args: - hosts, default_keyspace, consistency, kwargs = delayed_connect_args - delayed_connect_args = None +def handle_lazy_connect(): + global lazy_connect_args + if lazy_connect_args: + hosts, default_keyspace, consistency, kwargs = lazy_connect_args + lazy_connect_args = None setup(hosts, default_keyspace, consistency, **kwargs) From b5924f38a44c9ebb2e6936206188fe83d6e4e06a Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 2 Jul 2014 14:06:29 -0700 Subject: [PATCH 0806/3726] trying to get travis to use nose instead of py.test --- .travis.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index 86b92c0ac8..172874a6f5 100644 --- a/.travis.yml +++ b/.travis.yml @@ -8,7 +8,9 @@ before_install: - sudo service cassandra start install: - "pip install -r requirements.txt --use-mirrors" - - "pip install pytest --use-mirrors" + #- "pip install pytest --use-mirrors" + script: - while [ ! -f /var/run/cassandra.pid ] ; do sleep 1 ; done # wait until cassandra is ready - - "py.test cqlengine/tests/" + - "nosetests --no-skip" + # - "py.test cqlengine/tests/" From 8372825687f5bb62adf5f5efed9489b60e21c24a Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 2 Jul 2014 16:47:45 -0700 Subject: [PATCH 0807/3726] setting up tox for python 2 + 3 testing --- tox.ini | 6 ++++++ 1 file changed, 6 insertions(+) create mode 100644 tox.ini diff --git a/tox.ini b/tox.ini new file mode 100644 index 0000000000..eee198488f --- /dev/null +++ b/tox.ini @@ -0,0 +1,6 @@ +[tox] +envlist=py27,py34 + +[testenv] +deps= -rrequirements.txt +commands=nosetests --no-skip From 0ced4896fa2313db399d2387aa2edd0b536f2ef4 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 2 Jul 2014 17:16:12 -0700 Subject: [PATCH 0808/3726] use current version of the dev libraries --- requirements.txt | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/requirements.txt b/requirements.txt index 5973311b23..e648425ad6 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,6 @@ -ipython==0.13.1 -ipdb==0.7 -Sphinx==1.1.3 -mock==1.0.1 +ipython +ipdb +Sphinx +mock sure==1.2.5 cassandra-driver>=2.0.0 From 2bfdfcc495a0bc2ba3626d345388afd9abf98677 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 2 Jul 2014 17:31:48 -0700 Subject: [PATCH 0809/3726] changelog --- changelog | 28 ++++++++++++++++++++++------ 1 file changed, 22 insertions(+), 6 deletions(-) diff --git a/changelog b/changelog index e30733c62b..f6bd732333 100644 --- a/changelog +++ b/changelog @@ -2,11 +2,27 @@ CHANGELOG 0.16.0 -* Check number of elements in collections. -* Add support for table properties. - - -0.15.0 + 222: figure out how to make travis not totally fail when a test is skipped + 220: delayed connect. use setup(delayed_connect=True) + 218: throw exception on create_table and delete_table + 212: Unchanged primary key trigger error on update + 206: FAQ - why we dont' do #189 + 191: Add support for simple table properties. + 172: raise exception when None is passed in as query param + 170: trying to perform queries before connection is established should raise useful exception + 162: Not Possible to Make Non-Equality Filtering Queries + 161: Filtering on DateTime column + 154: Blob(bytes) column type issue + 128: remove default read_repair_chance & ensure changes are saved when using sync_table + 106: specify caching on model + 99: specify caching options table management + 94: type checking on sync_table (currently allows anything then fails miserably) + 73: remove default 'cqlengine' keyspace table management + 71: add named table and query expression usage to docs + + + +0.15.0 * native driver integration 0.14.0 @@ -14,7 +30,7 @@ CHANGELOG * fix for setting map to empty (Lifto) * report when creating models with attributes that conflict with cqlengine (maedhroz) * use stable version of sure package (maedhroz) -* performance improvements +* performance improvements 0.13.0 From 752960ef5a0dcc7caa29c069a897bfdbc64b1930 Mon Sep 17 00:00:00 2001 From: "mission.liao" Date: Tue, 1 Jul 2014 18:48:58 +0800 Subject: [PATCH 0810/3726] 'IF NOT EXISTS' for insert statement The usage is just like 'ttl'. --- cqlengine/models.py | 24 +++++- cqlengine/query.py | 15 +++- cqlengine/statements.py | 21 +++++ cqlengine/tests/test_checkexist.py | 129 +++++++++++++++++++++++++++++ 4 files changed, 184 insertions(+), 5 deletions(-) create mode 100644 cqlengine/tests/test_checkexist.py diff --git a/cqlengine/models.py b/cqlengine/models.py index b7f80f60eb..b1bc8569ae 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -111,6 +111,23 @@ def timestamp_setter(ts): def __call__(self, *args, **kwargs): raise NotImplementedError +class CheckExistDescriptor(object): + """ + return a query set descriptor with a check-exist flag specified + """ + def __get__(self, instance, model): + if instance: + # instance method + def checkexist_setter(ce): + instance._check_exist = ce + return instance + return checkexist_setter + + return model.objects.check_exist + + def __call__(self, *args, **kwargs): + raise NotImplementedError + class ConsistencyDescriptor(object): """ returns a query set descriptor if called on Class, instance if it was an instance call @@ -220,6 +237,8 @@ class MultipleObjectsReturned(_MultipleObjectsReturned): pass # custom timestamps, see USING TIMESTAMP X timestamp = TimestampDescriptor() + + check_exist = CheckExistDescriptor() # _len is lazily created by __len__ @@ -271,6 +290,8 @@ class MultipleObjectsReturned(_MultipleObjectsReturned): pass _timestamp = None # optional timestamp to include with the operation (USING TIMESTAMP) + _check_exist = False # optional check_exist flag to check existence before insertion + def __init__(self, **values): self._values = {} self._ttl = None @@ -523,7 +544,8 @@ def save(self): batch=self._batch, ttl=self._ttl, timestamp=self._timestamp, - consistency=self.__consistency__).save() + consistency=self.__consistency__, + check_exist=self._check_exist).save() #reset the value managers for v in self._values.values(): diff --git a/cqlengine/query.py b/cqlengine/query.py index 0a64a5c78a..f6305d3c40 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -222,6 +222,7 @@ def __init__(self, model): self._ttl = None self._consistency = None self._timestamp = None + self._check_exist = False @property def column_family_name(self): @@ -571,7 +572,7 @@ def defer(self, fields): def create(self, **kwargs): return self.model(**kwargs).batch(self._batch).ttl(self._ttl).\ - consistency(self._consistency).\ + consistency(self._consistency).check_exist(self._check_exist).\ timestamp(self._timestamp).save() def delete(self): @@ -710,6 +711,11 @@ def timestamp(self, timestamp): clone._timestamp = timestamp return clone + def check_exist(self, check_exist): + clone = copy.deepcopy(self) + clone._check_exist = check_exist + return clone + def update(self, **values): """ Updates the rows in this queryset """ if not values: @@ -769,8 +775,9 @@ class DMLQuery(object): _ttl = None _consistency = None _timestamp = None + _check_exist = False - def __init__(self, model, instance=None, batch=None, ttl=None, consistency=None, timestamp=None): + def __init__(self, model, instance=None, batch=None, ttl=None, consistency=None, timestamp=None, check_exist=False): self.model = model self.column_family_name = self.model.column_family_name() self.instance = instance @@ -778,6 +785,7 @@ def __init__(self, model, instance=None, batch=None, ttl=None, consistency=None, self._ttl = ttl self._consistency = consistency self._timestamp = timestamp + self._check_exist = check_exist def _execute(self, q): if self._batch: @@ -889,7 +897,7 @@ def save(self): if self.instance._has_counter or self.instance._can_update(): return self.update() else: - insert = InsertStatement(self.column_family_name, ttl=self._ttl, timestamp=self._timestamp) + insert = InsertStatement(self.column_family_name, ttl=self._ttl, timestamp=self._timestamp, check_exist=self._check_exist) for name, col in self.instance._columns.items(): val = getattr(self.instance, name, None) if col._val_is_null(val): @@ -905,7 +913,6 @@ def save(self): # caused by pointless update queries if not insert.is_empty: self._execute(insert) - # delete any nulled columns self._delete_null_columns() diff --git a/cqlengine/statements.py b/cqlengine/statements.py index 549b529df7..9373f70faf 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -617,6 +617,24 @@ def get_context(self): class InsertStatement(AssignmentStatement): """ an cql insert select statement """ + def __init__(self, + table, + assignments=None, + consistency=None, + where=None, + ttl=None, + timestamp=None, + check_exist=False): + super(InsertStatement, self).__init__( + table, + assignments=assignments, + consistency=consistency, + where=where, + ttl=ttl, + timestamp=timestamp) + + self.check_exist = check_exist + def add_where_clause(self, clause): raise StatementException("Cannot add where clauses to insert statements") @@ -631,6 +649,9 @@ def __unicode__(self): qs += ['VALUES'] qs += ["({})".format(', '.join(['%({})s'.format(v) for v in values]))] + if self.check_exist: + qs += ["IF NOT EXISTS"] + if self.ttl: qs += ["USING TTL {}".format(self.ttl)] diff --git a/cqlengine/tests/test_checkexist.py b/cqlengine/tests/test_checkexist.py new file mode 100644 index 0000000000..7797459e62 --- /dev/null +++ b/cqlengine/tests/test_checkexist.py @@ -0,0 +1,129 @@ +from cqlengine.management import sync_table, drop_table, create_keyspace, delete_keyspace +from cqlengine.tests.base import BaseCassEngTestCase +from cqlengine.models import Model +from cqlengine import columns +from uuid import uuid4 +import mock +from cqlengine.connection import get_session + + +class TestChechExistModel(Model): + + __keyspace__ = 'cqlengine_test_checkexist' + + id = columns.UUID(primary_key=True, default=lambda:uuid4()) + count = columns.Integer() + text = columns.Text(required=False) + + +class BaseCheckExistTest(BaseCassEngTestCase): + + @classmethod + def setUpClass(cls): + super(BaseCheckExistTest, cls).setUpClass() + """ + when receiving an insert statement with 'if not exist', cassandra would + perform a read with QUORUM level. Unittest would be failed if replica_factor + is 3 and one node only. Therefore I have create a new keyspace with + replica_factor:1. + """ + create_keyspace(TestChechExistModel.__keyspace__, replication_factor=1) + sync_table(TestChechExistModel) + + @classmethod + def tearDownClass(cls): + super(BaseCassEngTestCase, cls).tearDownClass() + drop_table(TestChechExistModel) + delete_keyspace(TestChechExistModel.__keyspace__) + + +class CheckExistInsertTests(BaseCheckExistTest): + + def test_insert_check_exist_success(self): + """ tests that insertion with check_exist work as expected """ + id = uuid4() + + TestChechExistModel.create(id=id, count=8, text='123456789') + TestChechExistModel.check_exist(True).create(id=id, count=9, text='111111111111') + + q = TestChechExistModel.objects(id=id) + self.assertEqual(len(q), 1) + + tm = q.first() + self.assertEquals(tm.count, 8) + self.assertEquals(tm.text, '123456789') + + def test_insert_check_exist_failure(self): + """ tests that insertion with check_exist failure """ + id = uuid4() + + TestChechExistModel.create(id=id, count=8, text='123456789') + TestChechExistModel.check_exist(False).create(id=id, count=9, text='111111111111') + + q = TestChechExistModel.objects(id=id) + self.assertEquals(len(q), 1) + + tm = q.first() + self.assertEquals(tm.count, 9) + self.assertEquals(tm.text, '111111111111') + + +class CheckExistModelTest(BaseCheckExistTest): + + def test_check_exist_included_on_create(self): + """ tests that check_exist on models works as expected """ + + session = get_session() + + with mock.patch.object(session, 'execute') as m: + TestChechExistModel.check_exist(True).create(count=8) + + query = m.call_args[0][0].query_string + self.assertIn("IF NOT EXISTS", query) + + def test_check_exist_included_on_save(self): + + session = get_session() + + with mock.patch.object(session, 'execute') as m: + tm = TestChechExistModel(count=8) + tm.check_exist(True).save() + + query = m.call_args[0][0].query_string + self.assertIn("IF NOT EXISTS", query) + + def test_queryset_is_returned_on_class(self): + """ ensure we get a queryset description back """ + qs = TestChechExistModel.check_exist(True) + self.assertTrue(isinstance(qs, TestChechExistModel.__queryset__), type(qs)) + + +class CheckExistInstanceTest(BaseCheckExistTest): + + def test_instance_is_returned(self): + """ + ensures that we properly handle the instance.check_exist(True).save() + scenario + """ + o = TestChechExistModel.create(text="whatever") + o.text = "new stuff" + o = o.check_exist(True) + self.assertEqual(True, o._check_exist) + + def test_check_exist_is_not_include_with_query_on_update(self): + """ + make sure we don't put 'IF NOT EXIST' in update statements + """ + session = get_session() + + o = TestChechExistModel.create(text="whatever") + o.text = "new stuff" + o = o.check_exist(True) + + with mock.patch.object(session, 'execute') as m: + o.save() + + query = m.call_args[0][0].query_string + self.assertNotIn("IF NOT EXIST", query) + + From 708f09a4cd9900c80e3fd5dd1a783fbda1ddc6c5 Mon Sep 17 00:00:00 2001 From: "mission.liao" Date: Wed, 2 Jul 2014 12:53:22 +0800 Subject: [PATCH 0811/3726] rename all 'check_exist' related things to 'if_not_exists' --- cqlengine/models.py | 18 ++--- cqlengine/query.py | 16 ++--- cqlengine/statements.py | 6 +- ...test_checkexist.py => test_ifnotexists.py} | 72 +++++++++---------- 4 files changed, 56 insertions(+), 56 deletions(-) rename cqlengine/tests/{test_checkexist.py => test_ifnotexists.py} (50%) diff --git a/cqlengine/models.py b/cqlengine/models.py index b1bc8569ae..19798d4bee 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -111,19 +111,19 @@ def timestamp_setter(ts): def __call__(self, *args, **kwargs): raise NotImplementedError -class CheckExistDescriptor(object): +class IfNotExistsDescriptor(object): """ - return a query set descriptor with a check-exist flag specified + return a query set descriptor with a if_not_exists flag specified """ def __get__(self, instance, model): if instance: # instance method - def checkexist_setter(ce): - instance._check_exist = ce + def ifnotexists_setter(ife): + instance._if_not_exists = ife return instance - return checkexist_setter + return ifnotexists_setter - return model.objects.check_exist + return model.objects.if_not_exists def __call__(self, *args, **kwargs): raise NotImplementedError @@ -238,7 +238,7 @@ class MultipleObjectsReturned(_MultipleObjectsReturned): pass # custom timestamps, see USING TIMESTAMP X timestamp = TimestampDescriptor() - check_exist = CheckExistDescriptor() + if_not_exists = IfNotExistsDescriptor() # _len is lazily created by __len__ @@ -290,7 +290,7 @@ class MultipleObjectsReturned(_MultipleObjectsReturned): pass _timestamp = None # optional timestamp to include with the operation (USING TIMESTAMP) - _check_exist = False # optional check_exist flag to check existence before insertion + _if_not_exists = False # optional if_not_exists flag to check existence before insertion def __init__(self, **values): self._values = {} @@ -545,7 +545,7 @@ def save(self): ttl=self._ttl, timestamp=self._timestamp, consistency=self.__consistency__, - check_exist=self._check_exist).save() + if_not_exists=self._if_not_exists).save() #reset the value managers for v in self._values.values(): diff --git a/cqlengine/query.py b/cqlengine/query.py index f6305d3c40..b4dcecb2b8 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -222,7 +222,7 @@ def __init__(self, model): self._ttl = None self._consistency = None self._timestamp = None - self._check_exist = False + self._if_not_exists = False @property def column_family_name(self): @@ -572,7 +572,7 @@ def defer(self, fields): def create(self, **kwargs): return self.model(**kwargs).batch(self._batch).ttl(self._ttl).\ - consistency(self._consistency).check_exist(self._check_exist).\ + consistency(self._consistency).if_not_exists(self._if_not_exists).\ timestamp(self._timestamp).save() def delete(self): @@ -711,9 +711,9 @@ def timestamp(self, timestamp): clone._timestamp = timestamp return clone - def check_exist(self, check_exist): + def if_not_exists(self, if_not_exists): clone = copy.deepcopy(self) - clone._check_exist = check_exist + clone._if_not_exists = if_not_exists return clone def update(self, **values): @@ -775,9 +775,9 @@ class DMLQuery(object): _ttl = None _consistency = None _timestamp = None - _check_exist = False + _if_not_exists = False - def __init__(self, model, instance=None, batch=None, ttl=None, consistency=None, timestamp=None, check_exist=False): + def __init__(self, model, instance=None, batch=None, ttl=None, consistency=None, timestamp=None, if_not_exists=False): self.model = model self.column_family_name = self.model.column_family_name() self.instance = instance @@ -785,7 +785,7 @@ def __init__(self, model, instance=None, batch=None, ttl=None, consistency=None, self._ttl = ttl self._consistency = consistency self._timestamp = timestamp - self._check_exist = check_exist + self._if_not_exists = if_not_exists def _execute(self, q): if self._batch: @@ -897,7 +897,7 @@ def save(self): if self.instance._has_counter or self.instance._can_update(): return self.update() else: - insert = InsertStatement(self.column_family_name, ttl=self._ttl, timestamp=self._timestamp, check_exist=self._check_exist) + insert = InsertStatement(self.column_family_name, ttl=self._ttl, timestamp=self._timestamp, if_not_exists=self._if_not_exists) for name, col in self.instance._columns.items(): val = getattr(self.instance, name, None) if col._val_is_null(val): diff --git a/cqlengine/statements.py b/cqlengine/statements.py index 9373f70faf..faf38e521e 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -624,7 +624,7 @@ def __init__(self, where=None, ttl=None, timestamp=None, - check_exist=False): + if_not_exists=False): super(InsertStatement, self).__init__( table, assignments=assignments, @@ -633,7 +633,7 @@ def __init__(self, ttl=ttl, timestamp=timestamp) - self.check_exist = check_exist + self.if_not_exists = if_not_exists def add_where_clause(self, clause): raise StatementException("Cannot add where clauses to insert statements") @@ -649,7 +649,7 @@ def __unicode__(self): qs += ['VALUES'] qs += ["({})".format(', '.join(['%({})s'.format(v) for v in values]))] - if self.check_exist: + if self.if_not_exists: qs += ["IF NOT EXISTS"] if self.ttl: diff --git a/cqlengine/tests/test_checkexist.py b/cqlengine/tests/test_ifnotexists.py similarity index 50% rename from cqlengine/tests/test_checkexist.py rename to cqlengine/tests/test_ifnotexists.py index 7797459e62..bca737be6e 100644 --- a/cqlengine/tests/test_checkexist.py +++ b/cqlengine/tests/test_ifnotexists.py @@ -7,60 +7,60 @@ from cqlengine.connection import get_session -class TestChechExistModel(Model): +class TestIfNotExistsModel(Model): - __keyspace__ = 'cqlengine_test_checkexist' + __keyspace__ = 'cqlengine_test_ifnotexists' id = columns.UUID(primary_key=True, default=lambda:uuid4()) count = columns.Integer() text = columns.Text(required=False) -class BaseCheckExistTest(BaseCassEngTestCase): +class BaseIfNotExistsTest(BaseCassEngTestCase): @classmethod def setUpClass(cls): - super(BaseCheckExistTest, cls).setUpClass() + super(BaseIfNotExistsTest, cls).setUpClass() """ when receiving an insert statement with 'if not exist', cassandra would perform a read with QUORUM level. Unittest would be failed if replica_factor is 3 and one node only. Therefore I have create a new keyspace with replica_factor:1. """ - create_keyspace(TestChechExistModel.__keyspace__, replication_factor=1) - sync_table(TestChechExistModel) + create_keyspace(TestIfNotExistsModel.__keyspace__, replication_factor=1) + sync_table(TestIfNotExistsModel) @classmethod def tearDownClass(cls): super(BaseCassEngTestCase, cls).tearDownClass() - drop_table(TestChechExistModel) - delete_keyspace(TestChechExistModel.__keyspace__) + drop_table(TestIfNotExistsModel) + delete_keyspace(TestIfNotExistsModel.__keyspace__) -class CheckExistInsertTests(BaseCheckExistTest): +class IfNotExistsInsertTests(BaseIfNotExistsTest): - def test_insert_check_exist_success(self): - """ tests that insertion with check_exist work as expected """ + def test_insert_if_not_exists_success(self): + """ tests that insertion with if_not_exists work as expected """ id = uuid4() - TestChechExistModel.create(id=id, count=8, text='123456789') - TestChechExistModel.check_exist(True).create(id=id, count=9, text='111111111111') + TestIfNotExistsModel.create(id=id, count=8, text='123456789') + TestIfNotExistsModel.if_not_exists(True).create(id=id, count=9, text='111111111111') - q = TestChechExistModel.objects(id=id) + q = TestIfNotExistsModel.objects(id=id) self.assertEqual(len(q), 1) tm = q.first() self.assertEquals(tm.count, 8) self.assertEquals(tm.text, '123456789') - def test_insert_check_exist_failure(self): - """ tests that insertion with check_exist failure """ + def test_insert_if_not_exists_failure(self): + """ tests that insertion with if_not_exists failure """ id = uuid4() - TestChechExistModel.create(id=id, count=8, text='123456789') - TestChechExistModel.check_exist(False).create(id=id, count=9, text='111111111111') + TestIfNotExistsModel.create(id=id, count=8, text='123456789') + TestIfNotExistsModel.if_not_exists(False).create(id=id, count=9, text='111111111111') - q = TestChechExistModel.objects(id=id) + q = TestIfNotExistsModel.objects(id=id) self.assertEquals(len(q), 1) tm = q.first() @@ -68,57 +68,57 @@ def test_insert_check_exist_failure(self): self.assertEquals(tm.text, '111111111111') -class CheckExistModelTest(BaseCheckExistTest): +class IfNotExistsModelTest(BaseIfNotExistsTest): - def test_check_exist_included_on_create(self): - """ tests that check_exist on models works as expected """ + def test_if_not_exists_included_on_create(self): + """ tests that if_not_exists on models works as expected """ session = get_session() with mock.patch.object(session, 'execute') as m: - TestChechExistModel.check_exist(True).create(count=8) + TestIfNotExistsModel.if_not_exists(True).create(count=8) query = m.call_args[0][0].query_string self.assertIn("IF NOT EXISTS", query) - def test_check_exist_included_on_save(self): + def test_if_not_exists_included_on_save(self): session = get_session() with mock.patch.object(session, 'execute') as m: - tm = TestChechExistModel(count=8) - tm.check_exist(True).save() + tm = TestIfNotExistsModel(count=8) + tm.if_not_exists(True).save() query = m.call_args[0][0].query_string self.assertIn("IF NOT EXISTS", query) def test_queryset_is_returned_on_class(self): """ ensure we get a queryset description back """ - qs = TestChechExistModel.check_exist(True) - self.assertTrue(isinstance(qs, TestChechExistModel.__queryset__), type(qs)) + qs = TestIfNotExistsModel.if_not_exists(True) + self.assertTrue(isinstance(qs, TestIfNotExistsModel.__queryset__), type(qs)) -class CheckExistInstanceTest(BaseCheckExistTest): +class IfNotExistsInstanceTest(BaseIfNotExistsTest): def test_instance_is_returned(self): """ - ensures that we properly handle the instance.check_exist(True).save() + ensures that we properly handle the instance.if_not_exists(True).save() scenario """ - o = TestChechExistModel.create(text="whatever") + o = TestIfNotExistsModel.create(text="whatever") o.text = "new stuff" - o = o.check_exist(True) - self.assertEqual(True, o._check_exist) + o = o.if_not_exists(True) + self.assertEqual(True, o._if_not_exists) - def test_check_exist_is_not_include_with_query_on_update(self): + def test_if_not_exists_is_not_include_with_query_on_update(self): """ make sure we don't put 'IF NOT EXIST' in update statements """ session = get_session() - o = TestChechExistModel.create(text="whatever") + o = TestIfNotExistsModel.create(text="whatever") o.text = "new stuff" - o = o.check_exist(True) + o = o.if_not_exists(True) with mock.patch.object(session, 'execute') as m: o.save() From dd878240dc84c79885fd1de6afaf7c0b3ae41d68 Mon Sep 17 00:00:00 2001 From: "mission.liao" Date: Thu, 3 Jul 2014 14:27:48 +0800 Subject: [PATCH 0812/3726] rename keyspace for testing --- cqlengine/tests/test_ifnotexists.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cqlengine/tests/test_ifnotexists.py b/cqlengine/tests/test_ifnotexists.py index bca737be6e..ca23c49c60 100644 --- a/cqlengine/tests/test_ifnotexists.py +++ b/cqlengine/tests/test_ifnotexists.py @@ -9,7 +9,7 @@ class TestIfNotExistsModel(Model): - __keyspace__ = 'cqlengine_test_ifnotexists' + __keyspace__ = 'cqlengine_test_lwt' id = columns.UUID(primary_key=True, default=lambda:uuid4()) count = columns.Integer() From 7a381327219f582aae6802e407a1094153c805cb Mon Sep 17 00:00:00 2001 From: "mission.liao" Date: Thu, 3 Jul 2014 14:41:37 +0800 Subject: [PATCH 0813/3726] update docs --- docs/topics/models.rst | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/docs/topics/models.rst b/docs/topics/models.rst index 14f905b40d..786c0642a1 100644 --- a/docs/topics/models.rst +++ b/docs/topics/models.rst @@ -147,6 +147,12 @@ Model Methods Sets the ttl values to run instance updates and inserts queries with. + .. method:: if_not_exists(enable_or_not) + + Check the existence of an object before insertion. The existence of an + object is determined by its primary key(s). And please note using this flag + would incur performance cost. + .. method:: update(**values) Performs an update on the model instance. You can pass in values to set on the model From 82195877b4e1871b10677944a4831c1c9aedd9ae Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 3 Jul 2014 11:40:32 -0700 Subject: [PATCH 0814/3726] rewording FAQ item --- docs/topics/faq.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/topics/faq.rst b/docs/topics/faq.rst index 0b58b6358e..b250180bb2 100644 --- a/docs/topics/faq.rst +++ b/docs/topics/faq.rst @@ -2,7 +2,7 @@ Frequently Asked Questions ========================== -Q: Why does calling my Model(field=blah, field2=blah2) not work? +Q: Why don't updates work correctly on models instantiated as Model(field=blah, field2=blah2)? ------------------------------------------------------------------- -A: The __init__() of a model is used by cqlengine internally. If you want to create a new row in the database, use create(). +A: The recommended way to create new rows is with the models .create method. The values passed into a model's init method are interpreted by the model as the values as they were read from a row. This allows the model to "know" which rows have changed since the row was read out of cassandra, and create suitable update statements. \ No newline at end of file From cae022209fada866c2943b71f13a5a78a65fb705 Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 3 Jul 2014 13:25:01 -0700 Subject: [PATCH 0815/3726] adding warning when keyspaces are undefined. Since a default keyspace can be set on the setup function, we can't throw an exception, because it may not have been called by the time the model class is created --- cqlengine/models.py | 16 ++++++++++++++ .../tests/model/test_class_construction.py | 21 ++++++++++++++++++- 2 files changed, 36 insertions(+), 1 deletion(-) diff --git a/cqlengine/models.py b/cqlengine/models.py index b7f80f60eb..b800394994 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -1,5 +1,7 @@ from collections import OrderedDict import re +import warnings + from cqlengine import columns from cqlengine.exceptions import ModelException, CQLEngineException, ValidationError from cqlengine.query import ModelQuerySet, DMLQuery, AbstractQueryableColumn @@ -13,6 +15,9 @@ class PolyMorphicModelException(ModelException): pass DEFAULT_KEYSPACE = None +class UndefinedKeyspaceWarning(Warning): + pass + class hybrid_classmethod(object): """ @@ -756,6 +761,17 @@ def _get_polymorphic_base(bases): #create the class and add a QuerySet to it klass = super(ModelMetaClass, cls).__new__(cls, name, bases, attrs) + + if not klass.__abstract__ and klass.__keyspace__ is None: + warnings.warn( + "No keyspace defined on {}.{}\n" + " The default keyspace 'cqlengine' was removed in 0.16.\n" + " Please define a keyspace on the '__keyspace__' model class attribute\n" + " or set a default keyspace with management.setup function\n" + .format(klass.__module__, klass.__name__), + UndefinedKeyspaceWarning + ) + return klass diff --git a/cqlengine/tests/model/test_class_construction.py b/cqlengine/tests/model/test_class_construction.py index c8a54eb1c2..58b6112995 100644 --- a/cqlengine/tests/model/test_class_construction.py +++ b/cqlengine/tests/model/test_class_construction.py @@ -1,9 +1,10 @@ from uuid import uuid4 +import warnings from cqlengine.query import QueryException, ModelQuerySet, DMLQuery from cqlengine.tests.base import BaseCassEngTestCase from cqlengine.exceptions import ModelException, CQLEngineException -from cqlengine.models import Model, ModelDefinitionException, ColumnQueryEvaluator +from cqlengine.models import Model, ModelDefinitionException, ColumnQueryEvaluator, UndefinedKeyspaceWarning from cqlengine import columns import cqlengine @@ -208,6 +209,23 @@ class Model2(Model1): except Exception: assert False, "Model2 exception should not be caught by Model1" + def test_no_keyspace_warning(self): + with warnings.catch_warnings(record=True) as warn: + class NoKeyspace(Model): + key = columns.UUID(primary_key=True) + + self.assertEqual(len(warn), 1) + warn_message = warn[0] + self.assertEqual(warn_message.category, UndefinedKeyspaceWarning) + + def test_abstract_model_keyspace_warning_is_skipped(self): + with warnings.catch_warnings(record=True) as warn: + class NoKeyspace(Model): + __abstract__ = True + key = columns.UUID(primary_key=True) + + self.assertEqual(len(warn), 0) + class TestManualTableNaming(BaseCassEngTestCase): class RenamedTest(cqlengine.Model): @@ -223,6 +241,7 @@ def test_proper_table_naming(self): class AbstractModel(Model): __abstract__ = True + __keyspace__ = 'test' class ConcreteModel(AbstractModel): pkey = columns.Integer(primary_key=True) From d3ce856f91343dc2cbea6cf9e879c2dd43038ddb Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 3 Jul 2014 15:10:01 -0700 Subject: [PATCH 0816/3726] adding explicit keyspaces to ALL of the test models --- .../tests/columns/test_container_columns.py | 4 ++++ cqlengine/tests/columns/test_counter_column.py | 1 + cqlengine/tests/columns/test_validation.py | 11 +++++++++++ cqlengine/tests/columns/test_value_io.py | 1 + .../tests/management/test_compaction_settings.py | 10 ++++++++++ cqlengine/tests/management/test_management.py | 8 ++++++++ cqlengine/tests/model/test_class_construction.py | 16 ++++++++++++++++ .../tests/model/test_equality_operations.py | 1 + cqlengine/tests/model/test_model.py | 5 +++++ cqlengine/tests/model/test_model_io.py | 7 +++++++ cqlengine/tests/model/test_polymorphism.py | 8 ++++++++ cqlengine/tests/model/test_updates.py | 1 + cqlengine/tests/model/test_value_lists.py | 2 ++ cqlengine/tests/query/test_batch_query.py | 2 ++ cqlengine/tests/query/test_datetime_queries.py | 1 + cqlengine/tests/query/test_queryoperators.py | 2 ++ cqlengine/tests/query/test_queryset.py | 5 +++++ cqlengine/tests/query/test_updates.py | 1 + cqlengine/tests/test_batch_query.py | 1 + cqlengine/tests/test_consistency.py | 1 + cqlengine/tests/test_load.py | 1 + cqlengine/tests/test_timestamp.py | 1 + cqlengine/tests/test_ttl.py | 1 + 23 files changed, 91 insertions(+) diff --git a/cqlengine/tests/columns/test_container_columns.py b/cqlengine/tests/columns/test_container_columns.py index 86a96e7edb..0b756f3316 100644 --- a/cqlengine/tests/columns/test_container_columns.py +++ b/cqlengine/tests/columns/test_container_columns.py @@ -9,6 +9,7 @@ class TestSetModel(Model): + __keyspace__ = 'test' partition = columns.UUID(primary_key=True, default=uuid4) int_set = columns.Set(columns.Integer, required=False) @@ -161,6 +162,7 @@ def test_default_empty_container_saving(self): class TestListModel(Model): + __keyspace__ = 'test' partition = columns.UUID(primary_key=True, default=uuid4) int_list = columns.List(columns.Integer, required=False) @@ -306,6 +308,7 @@ def test_blind_list_updates_from_none(self): assert m3.int_list == [] class TestMapModel(Model): + __keyspace__ = 'test' partition = columns.UUID(primary_key=True, default=uuid4) int_map = columns.Map(columns.Integer, columns.UUID, required=False) @@ -505,6 +508,7 @@ def test_default_empty_container_saving(self): class TestCamelMapModel(Model): + __keyspace__ = 'test' partition = columns.UUID(primary_key=True, default=uuid4) camelMap = columns.Map(columns.Text, columns.Integer, required=False) diff --git a/cqlengine/tests/columns/test_counter_column.py b/cqlengine/tests/columns/test_counter_column.py index 725b83905d..8e8b2361e4 100644 --- a/cqlengine/tests/columns/test_counter_column.py +++ b/cqlengine/tests/columns/test_counter_column.py @@ -8,6 +8,7 @@ class TestCounterModel(Model): + __keyspace__ = 'test' partition = columns.UUID(primary_key=True, default=uuid4) cluster = columns.UUID(primary_key=True, default=uuid4) counter = columns.Counter() diff --git a/cqlengine/tests/columns/test_validation.py b/cqlengine/tests/columns/test_validation.py index 749066f32d..0df006e8e9 100644 --- a/cqlengine/tests/columns/test_validation.py +++ b/cqlengine/tests/columns/test_validation.py @@ -32,6 +32,7 @@ class TestDatetime(BaseCassEngTestCase): class DatetimeTest(Model): + __keyspace__ = 'test' test_id = Integer(primary_key=True) created_at = DateTime() @@ -80,6 +81,7 @@ def test_datetime_none(self): class TestBoolDefault(BaseCassEngTestCase): class BoolDefaultValueTest(Model): + __keyspace__ = 'test' test_id = Integer(primary_key=True) stuff = Boolean(default=True) @@ -98,6 +100,7 @@ def test_default_is_set(self): class TestVarInt(BaseCassEngTestCase): class VarIntTest(Model): + __keyspace__ = 'test' test_id = Integer(primary_key=True) bignum = VarInt(primary_key=True) @@ -120,6 +123,7 @@ def test_varint_io(self): class TestDate(BaseCassEngTestCase): class DateTest(Model): + __keyspace__ = 'test' test_id = Integer(primary_key=True) created_at = Date() @@ -158,6 +162,7 @@ def test_date_none(self): class TestDecimal(BaseCassEngTestCase): class DecimalTest(Model): + __keyspace__ = 'test' test_id = Integer(primary_key=True) dec_val = Decimal() @@ -182,6 +187,7 @@ def test_decimal_io(self): class TestUUID(BaseCassEngTestCase): class UUIDTest(Model): + __keyspace__ = 'test' test_id = Integer(primary_key=True) a_uuid = UUID(default=uuid4()) @@ -209,6 +215,7 @@ def test_uuid_str_no_dashes(self): class TestTimeUUID(BaseCassEngTestCase): class TimeUUIDTest(Model): + __keyspace__ = 'test' test_id = Integer(primary_key=True) timeuuid = TimeUUID(default=uuid1()) @@ -234,6 +241,7 @@ def test_timeuuid_io(self): class TestInteger(BaseCassEngTestCase): class IntegerTest(Model): + __keyspace__ = 'test' test_id = UUID(primary_key=True, default=lambda:uuid4()) value = Integer(default=0, required=True) @@ -244,6 +252,7 @@ def test_default_zero_fields_validate(self): class TestBigInt(BaseCassEngTestCase): class BigIntTest(Model): + __keyspace__ = 'test' test_id = UUID(primary_key=True, default=lambda:uuid4()) value = BigInt(default=0, required=True) @@ -301,6 +310,7 @@ def test_non_required_validation(self): class TestExtraFieldsRaiseException(BaseCassEngTestCase): class TestModel(Model): + __keyspace__ = 'test' id = UUID(primary_key=True, default=uuid4) def test_extra_field(self): @@ -309,6 +319,7 @@ def test_extra_field(self): class TestPythonDoesntDieWhenExtraFieldIsInCassandra(BaseCassEngTestCase): class TestModel(Model): + __keyspace__ = 'test' __table_name__ = 'alter_doesnt_break_running_app' id = UUID(primary_key=True, default=uuid4) diff --git a/cqlengine/tests/columns/test_value_io.py b/cqlengine/tests/columns/test_value_io.py index 4b4ac7d200..1515c25a26 100644 --- a/cqlengine/tests/columns/test_value_io.py +++ b/cqlengine/tests/columns/test_value_io.py @@ -40,6 +40,7 @@ def setUpClass(cls): # create a table with the given column class IOTestModel(Model): + __keyspace__ = 'test' table_name = cls.column.db_type + "_io_test_model_{}".format(uuid4().hex[:8]) pkey = cls.column(primary_key=True) data = cls.column() diff --git a/cqlengine/tests/management/test_compaction_settings.py b/cqlengine/tests/management/test_compaction_settings.py index b34a5cafe0..2cad5d554a 100644 --- a/cqlengine/tests/management/test_compaction_settings.py +++ b/cqlengine/tests/management/test_compaction_settings.py @@ -9,6 +9,7 @@ class CompactionModel(Model): + __keyspace__ = 'test' __compaction__ = None cid = columns.UUID(primary_key=True) name = columns.Text() @@ -73,6 +74,7 @@ def test_sstable_size_in_mb(self): class LeveledcompactionTestTable(Model): + __keyspace__ = 'test' __compaction__ = LeveledCompactionStrategy __compaction_sstable_size_in_mb__ = 64 @@ -94,6 +96,7 @@ def test_compaction_not_altered_without_changes_leveled(self): from cqlengine.management import update_compaction class LeveledCompactionChangesDetectionTest(Model): + __keyspace__ = 'test' __compaction__ = LeveledCompactionStrategy __compaction_sstable_size_in_mb__ = 160 __compaction_tombstone_threshold__ = 0.125 @@ -110,6 +113,7 @@ def test_compaction_not_altered_without_changes_sizetiered(self): from cqlengine.management import update_compaction class SizeTieredCompactionChangesDetectionTest(Model): + __keyspace__ = 'test' __compaction__ = SizeTieredCompactionStrategy __compaction_bucket_high__ = 20 __compaction_bucket_low__ = 10 @@ -142,6 +146,7 @@ def test_alter_actually_alters(self): def test_alter_options(self): class AlterTable(Model): + __keyspace__ = 'test' __compaction__ = LeveledCompactionStrategy __compaction_sstable_size_in_mb__ = 64 @@ -158,6 +163,7 @@ class AlterTable(Model): class EmptyCompactionTest(BaseCassEngTestCase): def test_empty_compaction(self): class EmptyCompactionModel(Model): + __keyspace__ = 'test' __compaction__ = None cid = columns.UUID(primary_key=True) name = columns.Text() @@ -167,12 +173,14 @@ class EmptyCompactionModel(Model): class CompactionLeveledStrategyModel(Model): + __keyspace__ = 'test' __compaction__ = LeveledCompactionStrategy cid = columns.UUID(primary_key=True) name = columns.Text() class CompactionSizeTieredModel(Model): + __keyspace__ = 'test' __compaction__ = SizeTieredCompactionStrategy cid = columns.UUID(primary_key=True) name = columns.Text() @@ -183,6 +191,7 @@ class OptionsTest(BaseCassEngTestCase): def test_all_size_tiered_options(self): class AllSizeTieredOptionsModel(Model): + __keyspace__ = 'test' __compaction__ = SizeTieredCompactionStrategy __compaction_bucket_low__ = .3 __compaction_bucket_high__ = 2 @@ -211,6 +220,7 @@ class AllSizeTieredOptionsModel(Model): def test_all_leveled_options(self): class AllLeveledOptionsModel(Model): + __keyspace__ = 'test' __compaction__ = LeveledCompactionStrategy __compaction_sstable_size_in_mb__ = 64 diff --git a/cqlengine/tests/management/test_management.py b/cqlengine/tests/management/test_management.py index 44d0c33d06..1ed3137f92 100644 --- a/cqlengine/tests/management/test_management.py +++ b/cqlengine/tests/management/test_management.py @@ -27,16 +27,19 @@ def test_multiple_deletes_dont_fail(self): drop_table(TestModel) class LowercaseKeyModel(Model): + __keyspace__ = 'test' first_key = columns.Integer(primary_key=True) second_key = columns.Integer(primary_key=True) some_data = columns.Text() class CapitalizedKeyModel(Model): + __keyspace__ = 'test' firstKey = columns.Integer(primary_key=True) secondKey = columns.Integer(primary_key=True) someData = columns.Text() class PrimaryKeysOnlyModel(Model): + __keyspace__ = 'test' __compaction__ = LeveledCompactionStrategy first_ey = columns.Integer(primary_key=True) @@ -55,12 +58,14 @@ def test_table_definition(self): class FirstModel(Model): + __keyspace__ = 'test' __table_name__ = 'first_model' first_key = columns.UUID(primary_key=True) second_key = columns.UUID() third_key = columns.Text() class SecondModel(Model): + __keyspace__ = 'test' __table_name__ = 'first_model' first_key = columns.UUID(primary_key=True) second_key = columns.UUID() @@ -68,6 +73,7 @@ class SecondModel(Model): fourth_key = columns.Text() class ThirdModel(Model): + __keyspace__ = 'test' __table_name__ = 'first_model' first_key = columns.UUID(primary_key=True) second_key = columns.UUID() @@ -76,6 +82,7 @@ class ThirdModel(Model): blah = columns.Map(columns.Text, columns.Text) class FourthModel(Model): + __keyspace__ = 'test' __table_name__ = 'first_model' first_key = columns.UUID(primary_key=True) second_key = columns.UUID() @@ -109,6 +116,7 @@ def test_add_column(self): class ModelWithTableProperties(Model): + __keyspace__ = 'test' # Set random table properties __bloom_filter_fp_chance__ = 0.76328 __caching__ = CACHING_ALL diff --git a/cqlengine/tests/model/test_class_construction.py b/cqlengine/tests/model/test_class_construction.py index 58b6112995..06195ed66c 100644 --- a/cqlengine/tests/model/test_class_construction.py +++ b/cqlengine/tests/model/test_class_construction.py @@ -20,6 +20,7 @@ def test_column_attributes_handled_correctly(self): """ class TestModel(Model): + __keyspace__ = 'test' id = columns.UUID(primary_key=True, default=lambda:uuid4()) text = columns.Text() @@ -41,6 +42,7 @@ def test_db_map(self): -the db_map allows columns """ class WildDBNames(Model): + __keyspace__ = 'test' id = columns.UUID(primary_key=True, default=lambda:uuid4()) content = columns.Text(db_field='words_and_whatnot') numbers = columns.Integer(db_field='integers_etc') @@ -65,6 +67,7 @@ def test_column_ordering_is_preserved(self): """ class Stuff(Model): + __keyspace__ = 'test' id = columns.UUID(primary_key=True, default=lambda:uuid4()) words = columns.Text() content = columns.Text() @@ -75,6 +78,7 @@ class Stuff(Model): def test_exception_raised_when_creating_class_without_pk(self): with self.assertRaises(ModelDefinitionException): class TestModel(Model): + __keyspace__ = 'test' count = columns.Integer() text = columns.Text(required=False) @@ -84,6 +88,7 @@ def test_value_managers_are_keeping_model_instances_isolated(self): Tests that instance value managers are isolated from other instances """ class Stuff(Model): + __keyspace__ = 'test' id = columns.UUID(primary_key=True, default=lambda:uuid4()) num = columns.Integer() @@ -99,6 +104,7 @@ def test_superclass_fields_are_inherited(self): Tests that fields defined on the super class are inherited properly """ class TestModel(Model): + __keyspace__ = 'test' id = columns.UUID(primary_key=True, default=lambda:uuid4()) text = columns.Text() @@ -111,6 +117,7 @@ class InheritedModel(TestModel): def test_column_family_name_generation(self): """ Tests that auto column family name generation works as expected """ class TestModel(Model): + __keyspace__ = 'test' id = columns.UUID(primary_key=True, default=lambda:uuid4()) text = columns.Text() @@ -146,6 +153,7 @@ def test_partition_keys(self): Test compound partition key definition """ class ModelWithPartitionKeys(cqlengine.Model): + __keyspace__ = 'test' id = columns.UUID(primary_key=True, default=lambda:uuid4()) c1 = cqlengine.Text(primary_key=True) p1 = cqlengine.Text(partition_key=True) @@ -167,6 +175,7 @@ class ModelWithPartitionKeys(cqlengine.Model): def test_del_attribute_is_assigned_properly(self): """ Tests that columns that can be deleted have the del attribute """ class DelModel(Model): + __keyspace__ = 'test' id = columns.UUID(primary_key=True, default=lambda:uuid4()) key = columns.Integer(primary_key=True) data = columns.Integer(required=False) @@ -180,9 +189,11 @@ def test_does_not_exist_exceptions_are_not_shared_between_model(self): """ Tests that DoesNotExist exceptions are not the same exception between models """ class Model1(Model): + __keyspace__ = 'test' id = columns.UUID(primary_key=True, default=lambda:uuid4()) class Model2(Model): + __keyspace__ = 'test' id = columns.UUID(primary_key=True, default=lambda:uuid4()) try: @@ -196,6 +207,7 @@ class Model2(Model): def test_does_not_exist_inherits_from_superclass(self): """ Tests that a DoesNotExist exception can be caught by it's parent class DoesNotExist """ class Model1(Model): + __keyspace__ = 'test' id = columns.UUID(primary_key=True, default=lambda:uuid4()) class Model2(Model1): @@ -248,6 +260,7 @@ class ConcreteModel(AbstractModel): data = columns.Integer() class AbstractModelWithCol(Model): + __keyspace__ = 'test' __abstract__ = True pkey = columns.Integer(primary_key=True) @@ -256,6 +269,7 @@ class ConcreteModelWithCol(AbstractModelWithCol): class AbstractModelWithFullCols(Model): __abstract__ = True + __keyspace__ = 'test' pkey = columns.Integer(primary_key=True) data = columns.Integer() @@ -328,6 +342,7 @@ def create(iself, **kwargs): class CQModel(Model): __queryset__ = QSet + __keyspace__ = 'test' part = columns.UUID(primary_key=True) data = columns.Text() @@ -341,6 +356,7 @@ def save(iself): raise self.TestException class CDQModel(Model): + __keyspace__ = 'test' __dmlquery__ = DMLQ part = columns.UUID(primary_key=True) data = columns.Text() diff --git a/cqlengine/tests/model/test_equality_operations.py b/cqlengine/tests/model/test_equality_operations.py index c851e6089b..eeb8992b98 100644 --- a/cqlengine/tests/model/test_equality_operations.py +++ b/cqlengine/tests/model/test_equality_operations.py @@ -8,6 +8,7 @@ from cqlengine import columns class TestModel(Model): + __keyspace__ = 'test' id = columns.UUID(primary_key=True, default=lambda:uuid4()) count = columns.Integer() text = columns.Text(required=False) diff --git a/cqlengine/tests/model/test_model.py b/cqlengine/tests/model/test_model.py index 1e18359ec0..1bd143d01c 100644 --- a/cqlengine/tests/model/test_model.py +++ b/cqlengine/tests/model/test_model.py @@ -10,6 +10,7 @@ class TestModel(TestCase): def test_instance_equality(self): """ tests the model equality functionality """ class EqualityModel(Model): + __keyspace__ = 'test' pk = columns.Integer(primary_key=True) m0 = EqualityModel(pk=0) @@ -21,9 +22,11 @@ class EqualityModel(Model): def test_model_equality(self): """ tests the model equality functionality """ class EqualityModel0(Model): + __keyspace__ = 'test' pk = columns.Integer(primary_key=True) class EqualityModel1(Model): + __keyspace__ = 'test' kk = columns.Integer(primary_key=True) m0 = EqualityModel0(pk=0) @@ -40,6 +43,7 @@ def test_model_with_attribute_name_conflict(self): """should raise exception when model defines column that conflicts with built-in attribute""" with self.assertRaises(ModelDefinitionException): class IllegalTimestampColumnModel(Model): + __keyspace__ = 'test' my_primary_key = columns.Integer(primary_key=True) timestamp = columns.BigInt() @@ -47,5 +51,6 @@ def test_model_with_method_name_conflict(self): """should raise exception when model defines column that conflicts with built-in method""" with self.assertRaises(ModelDefinitionException): class IllegalFilterColumnModel(Model): + __keyspace__ = 'test' my_primary_key = columns.Integer(primary_key=True) filter = columns.Text() \ No newline at end of file diff --git a/cqlengine/tests/model/test_model_io.py b/cqlengine/tests/model/test_model_io.py index e114d75a86..1faaaf029a 100644 --- a/cqlengine/tests/model/test_model_io.py +++ b/cqlengine/tests/model/test_model_io.py @@ -11,12 +11,14 @@ from cqlengine import columns class TestModel(Model): + __keyspace__ = 'test' id = columns.UUID(primary_key=True, default=lambda:uuid4()) count = columns.Integer() text = columns.Text(required=False) a_bool = columns.Boolean(default=False) class TestModel(Model): + __keyspace__ = 'test' id = columns.UUID(primary_key=True, default=lambda:uuid4()) count = columns.Integer() text = columns.Text(required=False) @@ -115,6 +117,7 @@ def test_a_sensical_error_is_raised_if_you_try_to_create_a_table_twice(self): class TestMultiKeyModel(Model): + __keyspace__ = 'test' partition = columns.Integer(primary_key=True) cluster = columns.Integer(primary_key=True) count = columns.Integer(required=False) @@ -240,6 +243,7 @@ def test_success_case(self): class IndexDefinitionModel(Model): + __keyspace__ = 'test' key = columns.UUID(primary_key=True) val = columns.Text(index=True) @@ -250,6 +254,7 @@ def test_exception_isnt_raised_if_an_index_is_defined_more_than_once(self): sync_table(IndexDefinitionModel) class ReservedWordModel(Model): + __keyspace__ = 'test' token = columns.Text(primary_key=True) insert = columns.Integer(index=True) @@ -270,6 +275,7 @@ def test_reserved_cql_words_can_be_used_as_column_names(self): class TestQueryModel(Model): + __keyspace__ = 'test' test_id = columns.UUID(primary_key=True, default=uuid4) date = columns.Date(primary_key=True) description = columns.Text() @@ -304,6 +310,7 @@ def test_query_with_date(self): def test_none_filter_fails(): class NoneFilterModel(Model): + __keyspace__ = 'test' pk = columns.Integer(primary_key=True) v = columns.Integer() sync_table(NoneFilterModel) diff --git a/cqlengine/tests/model/test_polymorphism.py b/cqlengine/tests/model/test_polymorphism.py index 28c3dccb82..450a6ed4cc 100644 --- a/cqlengine/tests/model/test_polymorphism.py +++ b/cqlengine/tests/model/test_polymorphism.py @@ -14,6 +14,7 @@ def test_multiple_polymorphic_key_failure(self): """ Tests that defining a model with more than one polymorphic key fails """ with self.assertRaises(models.ModelDefinitionException): class M(models.Model): + __keyspace__ = 'test' partition = columns.Integer(primary_key=True) type1 = columns.Integer(polymorphic_key=True) type2 = columns.Integer(polymorphic_key=True) @@ -21,6 +22,7 @@ class M(models.Model): def test_polymorphic_key_inheritance(self): """ Tests that polymorphic_key attribute is not inherited """ class Base(models.Model): + __keyspace__ = 'test' partition = columns.Integer(primary_key=True) type1 = columns.Integer(polymorphic_key=True) @@ -35,6 +37,7 @@ class M2(M1): def test_polymorphic_metaclass(self): """ Tests that the model meta class configures polymorphic models properly """ class Base(models.Model): + __keyspace__ = 'test' partition = columns.Integer(primary_key=True) type1 = columns.Integer(polymorphic_key=True) @@ -55,6 +58,7 @@ class M1(Base): def test_table_names_are_inherited_from_poly_base(self): class Base(models.Model): + __keyspace__ = 'test' partition = columns.Integer(primary_key=True) type1 = columns.Integer(polymorphic_key=True) @@ -66,11 +70,13 @@ class M1(Base): def test_collection_columns_cant_be_polymorphic_keys(self): with self.assertRaises(models.ModelDefinitionException): class Base(models.Model): + __keyspace__ = 'test' partition = columns.Integer(primary_key=True) type1 = columns.Set(columns.Integer, polymorphic_key=True) class PolyBase(models.Model): + __keyspace__ = 'test' partition = columns.UUID(primary_key=True, default=uuid.uuid4) row_type = columns.Integer(polymorphic_key=True) @@ -137,6 +143,7 @@ def test_delete_on_polymorphic_subclass_does_not_include_polymorphic_key(self): class UnindexedPolyBase(models.Model): + __keyspace__ = 'test' partition = columns.UUID(primary_key=True, default=uuid.uuid4) cluster = columns.UUID(primary_key=True, default=uuid.uuid4) row_type = columns.Integer(polymorphic_key=True) @@ -194,6 +201,7 @@ def test_conflicting_type_results(self): class IndexedPolyBase(models.Model): + __keyspace__ = 'test' partition = columns.UUID(primary_key=True, default=uuid.uuid4) cluster = columns.UUID(primary_key=True, default=uuid.uuid4) row_type = columns.Integer(polymorphic_key=True, index=True) diff --git a/cqlengine/tests/model/test_updates.py b/cqlengine/tests/model/test_updates.py index 6141436c8f..4ffc817efd 100644 --- a/cqlengine/tests/model/test_updates.py +++ b/cqlengine/tests/model/test_updates.py @@ -10,6 +10,7 @@ class TestUpdateModel(Model): + __keyspace__ = 'test' partition = columns.UUID(primary_key=True, default=uuid4) cluster = columns.UUID(primary_key=True, default=uuid4) count = columns.Integer(required=False) diff --git a/cqlengine/tests/model/test_value_lists.py b/cqlengine/tests/model/test_value_lists.py index 8d4c76de5c..306fa28d89 100644 --- a/cqlengine/tests/model/test_value_lists.py +++ b/cqlengine/tests/model/test_value_lists.py @@ -8,10 +8,12 @@ class TestModel(Model): + __keyspace__ = 'test' id = columns.Integer(primary_key=True) clustering_key = columns.Integer(primary_key=True, clustering_order='desc') class TestClusteringComplexModel(Model): + __keyspace__ = 'test' id = columns.Integer(primary_key=True) clustering_key = columns.Integer(primary_key=True, clustering_order='desc') some_value = columns.Integer() diff --git a/cqlengine/tests/query/test_batch_query.py b/cqlengine/tests/query/test_batch_query.py index 1d7e1804db..9f3ecb5d6a 100644 --- a/cqlengine/tests/query/test_batch_query.py +++ b/cqlengine/tests/query/test_batch_query.py @@ -8,12 +8,14 @@ from cqlengine.tests.base import BaseCassEngTestCase class TestMultiKeyModel(Model): + __keyspace__ = 'test' partition = columns.Integer(primary_key=True) cluster = columns.Integer(primary_key=True) count = columns.Integer(required=False) text = columns.Text(required=False) class BatchQueryLogModel(Model): + __keyspace__ = 'test' # simple k/v table k = columns.Integer(primary_key=True) v = columns.Integer() diff --git a/cqlengine/tests/query/test_datetime_queries.py b/cqlengine/tests/query/test_datetime_queries.py index c6cb0627b7..8179b0f290 100644 --- a/cqlengine/tests/query/test_datetime_queries.py +++ b/cqlengine/tests/query/test_datetime_queries.py @@ -11,6 +11,7 @@ from cqlengine import query class DateTimeQueryTestModel(Model): + __keyspace__ = 'test' user = columns.Integer(primary_key=True) day = columns.DateTime(primary_key=True) data = columns.Text() diff --git a/cqlengine/tests/query/test_queryoperators.py b/cqlengine/tests/query/test_queryoperators.py index 01cce27fc8..5572d00c56 100644 --- a/cqlengine/tests/query/test_queryoperators.py +++ b/cqlengine/tests/query/test_queryoperators.py @@ -39,6 +39,7 @@ def test_mintimeuuid_function(self): class TokenTestModel(Model): + __keyspace__ = 'test' key = columns.Integer(primary_key=True) val = columns.Integer() @@ -74,6 +75,7 @@ def test_token_function(self): def test_compound_pk_token_function(self): class TestModel(Model): + __keyspace__ = 'test' p1 = columns.Text(partition_key=True) p2 = columns.Text(partition_key=True) diff --git a/cqlengine/tests/query/test_queryset.py b/cqlengine/tests/query/test_queryset.py index ced3891261..feb8905bbc 100644 --- a/cqlengine/tests/query/test_queryset.py +++ b/cqlengine/tests/query/test_queryset.py @@ -39,6 +39,7 @@ def dst(self, dt): class TestModel(Model): + __keyspace__ = 'test' test_id = columns.Integer(primary_key=True) attempt_id = columns.Integer(primary_key=True) description = columns.Text() @@ -47,6 +48,7 @@ class TestModel(Model): class IndexedTestModel(Model): + __keyspace__ = 'test' test_id = columns.Integer(primary_key=True) attempt_id = columns.Integer(index=True) description = columns.Text() @@ -55,6 +57,7 @@ class IndexedTestModel(Model): class TestMultiClusteringModel(Model): + __keyspace__ = 'test' one = columns.Integer(primary_key=True) two = columns.Integer(primary_key=True) three = columns.Integer(primary_key=True) @@ -373,6 +376,7 @@ def test_allow_filtering_flag(self): def test_non_quality_filtering(): class NonEqualityFilteringModel(Model): + __keyspace__ = 'test' example_id = columns.UUID(primary_key=True, default=uuid.uuid4) sequence_id = columns.Integer(primary_key=True) # sequence_id is a clustering key example_type = columns.Integer(index=True) @@ -542,6 +546,7 @@ def test_conn_is_returned_after_filling_cache(self): class TimeUUIDQueryModel(Model): + __keyspace__ = 'test' partition = columns.UUID(primary_key=True) time = columns.TimeUUID(primary_key=True) data = columns.Text(required=False) diff --git a/cqlengine/tests/query/test_updates.py b/cqlengine/tests/query/test_updates.py index d32755af8f..1037cf5a2a 100644 --- a/cqlengine/tests/query/test_updates.py +++ b/cqlengine/tests/query/test_updates.py @@ -9,6 +9,7 @@ class TestQueryUpdateModel(Model): + __keyspace__ = 'test' partition = columns.UUID(primary_key=True, default=uuid4) cluster = columns.Integer(primary_key=True) count = columns.Integer(required=False) diff --git a/cqlengine/tests/test_batch_query.py b/cqlengine/tests/test_batch_query.py index 9c2d4140c8..39204a594f 100644 --- a/cqlengine/tests/test_batch_query.py +++ b/cqlengine/tests/test_batch_query.py @@ -11,6 +11,7 @@ from cqlengine.tests.base import BaseCassEngTestCase class TestMultiKeyModel(Model): + __keyspace__ = 'test' partition = columns.Integer(primary_key=True) cluster = columns.Integer(primary_key=True) count = columns.Integer(required=False) diff --git a/cqlengine/tests/test_consistency.py b/cqlengine/tests/test_consistency.py index 9f0f3f86b3..a45117b00f 100644 --- a/cqlengine/tests/test_consistency.py +++ b/cqlengine/tests/test_consistency.py @@ -7,6 +7,7 @@ from cqlengine import ALL, BatchQuery class TestConsistencyModel(Model): + __keyspace__ = 'test' id = columns.UUID(primary_key=True, default=lambda:uuid4()) count = columns.Integer() text = columns.Text(required=False) diff --git a/cqlengine/tests/test_load.py b/cqlengine/tests/test_load.py index 16a42eac88..6e64d96d87 100644 --- a/cqlengine/tests/test_load.py +++ b/cqlengine/tests/test_load.py @@ -8,6 +8,7 @@ import gc class LoadTest(Model): + __keyspace__ = 'test' k = Integer(primary_key=True) v = Integer() diff --git a/cqlengine/tests/test_timestamp.py b/cqlengine/tests/test_timestamp.py index b1a6a2860d..cd298cf457 100644 --- a/cqlengine/tests/test_timestamp.py +++ b/cqlengine/tests/test_timestamp.py @@ -12,6 +12,7 @@ class TestTimestampModel(Model): + __keyspace__ = 'test' id = columns.UUID(primary_key=True, default=lambda:uuid4()) count = columns.Integer() diff --git a/cqlengine/tests/test_ttl.py b/cqlengine/tests/test_ttl.py index 45f865e0e6..bb5e1bb440 100644 --- a/cqlengine/tests/test_ttl.py +++ b/cqlengine/tests/test_ttl.py @@ -8,6 +8,7 @@ class TestTTLModel(Model): + __keyspace__ = 'test' id = columns.UUID(primary_key=True, default=lambda:uuid4()) count = columns.Integer() text = columns.Text(required=False) From f03bd5b2e534de0b394d095a606d17091b24535d Mon Sep 17 00:00:00 2001 From: Blake Eggleston Date: Thu, 3 Jul 2014 15:38:04 -0700 Subject: [PATCH 0817/3726] documenting the removal of the cqlengine keyspace --- README.md | 2 +- docs/index.rst | 4 +--- docs/topics/models.rst | 6 +++++- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 7965610d56..361fff385f 100644 --- a/README.md +++ b/README.md @@ -3,7 +3,7 @@ cqlengine cqlengine is a Cassandra CQL 3 Object Mapper for Python -**Users of versions < 0.4, please read this post: [Breaking Changes](https://groups.google.com/forum/?fromgroups#!topic/cqlengine-users/erkSNe1JwuU)** +**Users of versions < 0.16, the default keyspace 'cqlengine' has been removed. Please read this before upgrading:** [Breaking Changes](https://cqlengine.readthedocs.org/en/latest/topics/models.html#keyspace-change) [Documentation](https://cqlengine.readthedocs.org/en/latest/) diff --git a/docs/index.rst b/docs/index.rst index 99a08efc3a..7bb0e5e979 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -5,9 +5,7 @@ cqlengine documentation ======================= -**Users of versions < 0.4, please read this post before upgrading:** `Breaking Changes`_ - -.. _Breaking Changes: https://groups.google.com/forum/?fromgroups#!topic/cqlengine-users/erkSNe1JwuU +**Users of versions < 0.16, the default keyspace 'cqlengine' has been removed. Please read this before upgrading:** :ref:`Breaking Changes ` cqlengine is a Cassandra CQL 3 Object Mapper for Python diff --git a/docs/topics/models.rst b/docs/topics/models.rst index 14f905b40d..c7f28f3ee8 100644 --- a/docs/topics/models.rst +++ b/docs/topics/models.rst @@ -169,9 +169,13 @@ Model Attributes *Optional.* Sets the name of the CQL table for this model. If left blank, the table name will be the name of the model, with it's module name as it's prefix. Manually defined table names are not inherited. + .. _keyspace-change: .. attribute:: Model.__keyspace__ - *Optional.* Sets the name of the keyspace used by this model. Defaults to cqlengine + Sets the name of the keyspace used by this model. + + **Prior to cqlengine 0.16, this setting defaulted + to 'cqlengine'. As of 0.16, this field needs to be set on all non-abstract models, or their base classes.** Table Polymorphism From 912ecb1131d3142549b0655909abae8d80281e3c Mon Sep 17 00:00:00 2001 From: Michael Cyrulnik Date: Fri, 4 Jul 2014 03:50:50 -0400 Subject: [PATCH 0818/3726] setup has retry_connect, which will retry connect on next execute if Cassandra was down during setup. --- cqlengine/connection.py | 24 +++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index e22f7556dd..9f17fe8388 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -3,7 +3,7 @@ #http://cassandra.apache.org/doc/cql/CQL.html from collections import namedtuple -from cassandra.cluster import Cluster +from cassandra.cluster import Cluster, NoHostAvailable from cassandra.query import SimpleStatement, Statement try: @@ -35,6 +35,7 @@ def setup( default_keyspace, consistency=ConsistencyLevel.ONE, lazy_connect=False, + retry_connect=False, **kwargs): """ Records the hosts and connects to one of them @@ -59,11 +60,24 @@ def setup( default_consistency_level = consistency if lazy_connect: - lazy_connect_args = (hosts, default_keyspace, consistency, kwargs) + kwargs['default_keyspace'] = default_keyspace + kwargs['consistency'] = consistency + kwargs['lazy_connect'] = False + kwargs['retry_connect'] = retry_connect + lazy_connect_args = (hosts, kwargs) return cluster = Cluster(hosts, **kwargs) - session = cluster.connect() + try: + session = cluster.connect() + except NoHostAvailable: + if retry_connect: + kwargs['default_keyspace'] = default_keyspace + kwargs['consistency'] = consistency + kwargs['lazy_connect'] = False + kwargs['retry_connect'] = retry_connect + lazy_connect_args = (hosts, kwargs) + raise session.row_factory = dict_factory def execute(query, params=None, consistency_level=None): @@ -106,6 +120,6 @@ def get_cluster(): def handle_lazy_connect(): global lazy_connect_args if lazy_connect_args: - hosts, default_keyspace, consistency, kwargs = lazy_connect_args + hosts, kwargs = lazy_connect_args lazy_connect_args = None - setup(hosts, default_keyspace, consistency, **kwargs) + setup(hosts, **kwargs) From 8edb27bdc87611a9b39b762b20823c8b56d7896e Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Mon, 7 Jul 2014 14:19:28 -0700 Subject: [PATCH 0819/3726] fixed pagedresult issue --- cqlengine/connection.py | 10 ++++--- cqlengine/exceptions.py | 1 + cqlengine/models.py | 10 ------- cqlengine/query.py | 2 +- .../tests/model/test_class_construction.py | 9 ------- cqlengine/tests/query/test_queryset.py | 27 +++++++++++++++++-- 6 files changed, 33 insertions(+), 26 deletions(-) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index e22f7556dd..d5f6ed5c5e 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -14,7 +14,7 @@ import logging -from cqlengine.exceptions import CQLEngineException +from cqlengine.exceptions import CQLEngineException, UndefinedKeyspaceException from cassandra import ConsistencyLevel from cqlengine.statements import BaseCQLStatement from cassandra.query import dict_factory @@ -53,9 +53,11 @@ def setup( if 'username' in kwargs or 'password' in kwargs: raise CQLEngineException("Username & Password are now handled by using the native driver's auth_provider") - if default_keyspace: - from cqlengine import models - models.DEFAULT_KEYSPACE = default_keyspace + if not default_keyspace: + raise UndefinedKeyspaceException() + + from cqlengine import models + models.DEFAULT_KEYSPACE = default_keyspace default_consistency_level = consistency if lazy_connect: diff --git a/cqlengine/exceptions.py b/cqlengine/exceptions.py index 87b5cdc4c5..94a51b92ac 100644 --- a/cqlengine/exceptions.py +++ b/cqlengine/exceptions.py @@ -3,3 +3,4 @@ class CQLEngineException(Exception): pass class ModelException(CQLEngineException): pass class ValidationError(CQLEngineException): pass +class UndefinedKeyspaceException(CQLEngineException): pass diff --git a/cqlengine/models.py b/cqlengine/models.py index b800394994..d4ce1e7708 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -762,16 +762,6 @@ def _get_polymorphic_base(bases): #create the class and add a QuerySet to it klass = super(ModelMetaClass, cls).__new__(cls, name, bases, attrs) - if not klass.__abstract__ and klass.__keyspace__ is None: - warnings.warn( - "No keyspace defined on {}.{}\n" - " The default keyspace 'cqlengine' was removed in 0.16.\n" - " Please define a keyspace on the '__keyspace__' model class attribute\n" - " or set a default keyspace with management.setup function\n" - .format(klass.__module__, klass.__name__), - UndefinedKeyspaceWarning - ) - return klass diff --git a/cqlengine/query.py b/cqlengine/query.py index 0a64a5c78a..738822f3d5 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -293,7 +293,7 @@ def _execute_query(self): if self._batch: raise CQLEngineException("Only inserts, updates, and deletes are available in batch mode") if self._result_cache is None: - self._result_cache = self._execute(self._select_query()) + self._result_cache = list(self._execute(self._select_query())) self._construct_result = self._get_result_constructor() def _fill_result_cache_to_idx(self, idx): diff --git a/cqlengine/tests/model/test_class_construction.py b/cqlengine/tests/model/test_class_construction.py index 06195ed66c..37f0f81965 100644 --- a/cqlengine/tests/model/test_class_construction.py +++ b/cqlengine/tests/model/test_class_construction.py @@ -221,15 +221,6 @@ class Model2(Model1): except Exception: assert False, "Model2 exception should not be caught by Model1" - def test_no_keyspace_warning(self): - with warnings.catch_warnings(record=True) as warn: - class NoKeyspace(Model): - key = columns.UUID(primary_key=True) - - self.assertEqual(len(warn), 1) - warn_message = warn[0] - self.assertEqual(warn_message.category, UndefinedKeyspaceWarning) - def test_abstract_model_keyspace_warning_is_skipped(self): with warnings.catch_warnings(record=True) as warn: class NoKeyspace(Model): diff --git a/cqlengine/tests/query/test_queryset.py b/cqlengine/tests/query/test_queryset.py index feb8905bbc..1c9a3ed5f9 100644 --- a/cqlengine/tests/query/test_queryset.py +++ b/cqlengine/tests/query/test_queryset.py @@ -1,11 +1,11 @@ from datetime import datetime import time -from unittest import TestCase +from unittest import TestCase, skipUnless from uuid import uuid1, uuid4 import uuid from cqlengine.tests.base import BaseCassEngTestCase - +import mock from cqlengine.exceptions import ModelException from cqlengine import functions from cqlengine.management import sync_table, drop_table, sync_table @@ -20,6 +20,11 @@ from cqlengine import operators +from cqlengine.connection import get_cluster, get_session + +cluster = get_cluster() + + class TzOffset(tzinfo): """Minimal implementation of a timezone offset to help testing with timezone aware datetimes. @@ -684,3 +689,21 @@ def test_objects_property_returns_fresh_queryset(self): assert TestModel.objects._result_cache is None +@skipUnless(cluster.protocol_version >= 2, "only runs against the cql3 protocol v2.0") +def test_paged_result_handling(): + # addresses #225 + class PagingTest(Model): + id = columns.Integer(primary_key=True) + val = columns.Integer() + sync_table(PagingTest) + + PagingTest.create(id=1, val=1) + PagingTest.create(id=2, val=2) + + session = get_session() + with mock.patch.object(session, 'default_fetch_size', 1): + results = PagingTest.objects()[:] + + assert len(results) == 2 + + From 73ea42d932bd76d0104ee780ef4dacc408478dde Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Mon, 7 Jul 2014 14:27:05 -0700 Subject: [PATCH 0820/3726] updated changelog --- changelog | 1 + 1 file changed, 1 insertion(+) diff --git a/changelog b/changelog index f6bd732333..2aa66d4186 100644 --- a/changelog +++ b/changelog @@ -2,6 +2,7 @@ CHANGELOG 0.16.0 + 225: No handling of PagedResult from execute 222: figure out how to make travis not totally fail when a test is skipped 220: delayed connect. use setup(delayed_connect=True) 218: throw exception on create_table and delete_table From 2610fd782be8da812fe5f92e57b9db72d403936f Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Mon, 7 Jul 2014 14:27:22 -0700 Subject: [PATCH 0821/3726] version bump --- cqlengine/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cqlengine/VERSION b/cqlengine/VERSION index 1282fff53b..04a373efe6 100644 --- a/cqlengine/VERSION +++ b/cqlengine/VERSION @@ -1 +1 @@ -0.15.5 +0.16.0 From 7c5a608418d43f15ebd2089e0f71509595fd0ef6 Mon Sep 17 00:00:00 2001 From: Michael Cyrulnik Date: Tue, 8 Jul 2014 11:20:43 -0400 Subject: [PATCH 0822/3726] reorder session check in execute to allow lazy connect --- cqlengine/connection.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index 0ad7e05ad2..1dd35fb4cb 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -84,11 +84,11 @@ def setup( def execute(query, params=None, consistency_level=None): + handle_lazy_connect() + if not session: raise CQLEngineException("It is required to setup() cqlengine before executing queries") - handle_lazy_connect() - if consistency_level is None: consistency_level = default_consistency_level @@ -103,14 +103,11 @@ def execute(query, params=None, consistency_level=None): elif isinstance(query, basestring): query = SimpleStatement(query, consistency_level=consistency_level) - - params = params or {} result = session.execute(query, params) return result - def get_session(): handle_lazy_connect() return session From 831b4a2ab9ae4ffb7d8605e62cae433426e17e7c Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Tue, 8 Jul 2014 09:14:43 -0700 Subject: [PATCH 0823/3726] version bump, makefile for releasing --- Makefile | 14 ++++++++++++++ cqlengine/VERSION | 2 +- 2 files changed, 15 insertions(+), 1 deletion(-) create mode 100644 Makefile diff --git a/Makefile b/Makefile new file mode 100644 index 0000000000..061fc27426 --- /dev/null +++ b/Makefile @@ -0,0 +1,14 @@ +clean: + find . -name *.pyc -delete + rm -rf cqlengine/__pycache__ + + +build: clean + python setup.py build + +release: clean + python setup.py sdist upload + + +.PHONY: build + diff --git a/cqlengine/VERSION b/cqlengine/VERSION index 04a373efe6..2a0970ca75 100644 --- a/cqlengine/VERSION +++ b/cqlengine/VERSION @@ -1 +1 @@ -0.16.0 +0.16.1 From d8aa5ff4881f630c857366be753b616ffe5aba2c Mon Sep 17 00:00:00 2001 From: lenolib Date: Thu, 10 Jul 2014 11:29:41 +0200 Subject: [PATCH 0824/3726] Fix lazy_connect, call handle_lazy_connect before checking session existence --- cqlengine/connection.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index d5f6ed5c5e..5b483e40aa 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -70,10 +70,11 @@ def setup( def execute(query, params=None, consistency_level=None): + handle_lazy_connect() + if not session: raise CQLEngineException("It is required to setup() cqlengine before executing queries") - handle_lazy_connect() if consistency_level is None: consistency_level = default_consistency_level From 168d52b5521be4d58ed9c8d5828b5935d9a249bb Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Tue, 15 Jul 2014 09:12:14 -0700 Subject: [PATCH 0825/3726] fixed docs --- docs/conf.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index 1c01444817..82a8833b95 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -41,7 +41,7 @@ # General information about the project. project = u'cqlengine' -copyright = u'2012, Blake Eggleston' +copyright = u'2012, Blake Eggleston, Jon Haddad' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -185,7 +185,7 @@ # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ - ('index', 'cqlengine.tex', u'cqlengine Documentation', u'Blake Eggleston', 'manual'), + ('index', 'cqlengine.tex', u'cqlengine Documentation', u'Blake Eggleston, Jon Haddad', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of @@ -215,7 +215,7 @@ # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'cqlengine', u'cqlengine Documentation', - [u'Blake Eggleston'], 1) + [u'Blake Eggleston, Jon Haddad'], 1) ] # If true, show URL addresses after external links. @@ -229,7 +229,7 @@ # dir menu entry, description, category) texinfo_documents = [ ('index', 'cqlengine', u'cqlengine Documentation', - u'Blake Eggleston', 'cqlengine', 'One line description of project.', + u'Blake Eggleston, Jon Haddad', 'cqlengine', 'One line description of project.', 'Miscellaneous'), ] From 3929b72aecfb1982da0c9748821c0c39fb7169fa Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Tue, 15 Jul 2014 20:01:06 -0700 Subject: [PATCH 0826/3726] added clustering_order --- docs/topics/models.rst | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/topics/models.rst b/docs/topics/models.rst index c7f28f3ee8..c8683159f9 100644 --- a/docs/topics/models.rst +++ b/docs/topics/models.rst @@ -81,6 +81,9 @@ Column Options If True, this column is created as partition primary key. There may be many partition keys defined, forming a *composite partition key* + :attr:`~cqlengine.columns.BaseColumn.clustering_order` + ``ASC`` or ``DESC``, determines the clustering order of a clustering key. + :attr:`~cqlengine.columns.BaseColumn.index` If True, an index will be created for this column. Defaults to False. From 93586e48f2e12614dd767ebcd61efb37806eced0 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Tue, 15 Jul 2014 20:05:39 -0700 Subject: [PATCH 0827/3726] clustering keys example --- docs/topics/models.rst | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/docs/topics/models.rst b/docs/topics/models.rst index c8683159f9..fcaf0631c5 100644 --- a/docs/topics/models.rst +++ b/docs/topics/models.rst @@ -38,6 +38,18 @@ The Person model would create this CQL table: PRIMARY KEY (id) ) +Here's an example of a comment table created with clustering keys, in descending order: + +.. code-block:: python + + from cqlengine import columns + from cqlengine.models import Model + + class Comment(Model): + photo_id = columns.UUID(primary_key=True) + comment_id = columns.TimeUUID(primary_key=True, clustering_order="DESC") + comment = columns.Text() + Columns ======= From cbc5f205500e1c8353907732164e80fc53e90655 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Tue, 15 Jul 2014 20:08:11 -0700 Subject: [PATCH 0828/3726] clustering example --- docs/topics/models.rst | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/docs/topics/models.rst b/docs/topics/models.rst index fcaf0631c5..a4c9d88be1 100644 --- a/docs/topics/models.rst +++ b/docs/topics/models.rst @@ -50,6 +50,17 @@ Here's an example of a comment table created with clustering keys, in descending comment_id = columns.TimeUUID(primary_key=True, clustering_order="DESC") comment = columns.Text() +The Comment model's ``create table`` would look like the following: + +.. code-block:: sql + + CREATE TABLE comment ( + photo_id uuid, + comment_id timeuuid, + comment text, + PRIMARY KEY (photo_id, comment_id) + ) WITH CLUSTERING ORDER BY (comment_id DESC) + Columns ======= From 011dc2508d14b817176ebe856dc0b1f35f0dd54b Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Tue, 15 Jul 2014 20:08:34 -0700 Subject: [PATCH 0829/3726] examples, plural --- docs/topics/models.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/topics/models.rst b/docs/topics/models.rst index a4c9d88be1..aea3be7207 100644 --- a/docs/topics/models.rst +++ b/docs/topics/models.rst @@ -12,8 +12,8 @@ Models A model is a python class representing a CQL table. -Example -======= +Examples +======== This example defines a Person table, with the columns ``first_name`` and ``last_name`` From 2d92a702805dd755fb24476413e8b70b12696634 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Tue, 15 Jul 2014 20:10:41 -0700 Subject: [PATCH 0830/3726] syncing examples --- docs/topics/models.rst | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/docs/topics/models.rst b/docs/topics/models.rst index aea3be7207..08d4cfb0be 100644 --- a/docs/topics/models.rst +++ b/docs/topics/models.rst @@ -61,6 +61,15 @@ The Comment model's ``create table`` would look like the following: PRIMARY KEY (photo_id, comment_id) ) WITH CLUSTERING ORDER BY (comment_id DESC) +To sync the models to the database, you may do the following: + +`` code-block:: python + + from cqlengine.management import sync_table + sync_table(Person) + sync_table(Comment) + + Columns ======= From b92bf2021bb0c6ae80544d041576487b7bcaa925 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Tue, 15 Jul 2014 20:11:19 -0700 Subject: [PATCH 0831/3726] fixed code block --- docs/topics/models.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/topics/models.rst b/docs/topics/models.rst index 08d4cfb0be..1602590795 100644 --- a/docs/topics/models.rst +++ b/docs/topics/models.rst @@ -63,12 +63,12 @@ The Comment model's ``create table`` would look like the following: To sync the models to the database, you may do the following: -`` code-block:: python +.. code-block:: python from cqlengine.management import sync_table sync_table(Person) sync_table(Comment) - + Columns ======= From 5876f2a887d97123fee61d19379d5cada89a203b Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Fri, 25 Jul 2014 12:14:34 -0700 Subject: [PATCH 0832/3726] logging queries --- cqlengine/connection.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index 5b483e40aa..7cf8dea277 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -71,7 +71,7 @@ def setup( def execute(query, params=None, consistency_level=None): handle_lazy_connect() - + if not session: raise CQLEngineException("It is required to setup() cqlengine before executing queries") @@ -91,6 +91,7 @@ def execute(query, params=None, consistency_level=None): query = SimpleStatement(query, consistency_level=consistency_level) + LOG.info(query.query_string) params = params or {} result = session.execute(query, params) From 6c03b320d1c241db3b15d6475360fe0c3417e63f Mon Sep 17 00:00:00 2001 From: Greg Doermann Date: Wed, 30 Jul 2014 18:17:28 -0600 Subject: [PATCH 0833/3726] Reverted the changes I made as everything I had done is in the code base now. --- cqlengine/__init__.py | 2 +- cqlengine/columns.py | 14 +++++++++----- cqlengine/functions.py | 15 --------------- cqlengine/models.py | 16 +++------------- 4 files changed, 13 insertions(+), 34 deletions(-) diff --git a/cqlengine/__init__.py b/cqlengine/__init__.py index ecec3cd065..ef273d9664 100644 --- a/cqlengine/__init__.py +++ b/cqlengine/__init__.py @@ -4,7 +4,7 @@ from cqlengine.columns import * from cqlengine.functions import * -from cqlengine.models import Model, CounterModel +from cqlengine.models import Model from cqlengine.query import BatchQuery diff --git a/cqlengine/columns.py b/cqlengine/columns.py index 9e92eefed8..66286d38e8 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -473,11 +473,15 @@ def to_database(self, value): class Decimal(Column): db_type = 'decimal' -class Counter(Column): - #TODO: counter field - def __init__(self, **kwargs): - super(Counter, self).__init__(**kwargs) - raise NotImplementedError + def validate(self, value): + from decimal import Decimal as _Decimal + from decimal import InvalidOperation + val = super(Decimal, self).validate(value) + if val is None: return + try: + return _Decimal(val) + except InvalidOperation: + raise ValidationError("'{}' can't be coerced to decimal".format(val)) def to_python(self, value): return self.validate(value) diff --git a/cqlengine/functions.py b/cqlengine/functions.py index 164bfd4134..b5f6a5af71 100644 --- a/cqlengine/functions.py +++ b/cqlengine/functions.py @@ -116,18 +116,3 @@ def update_context(self, ctx): for i, (col, val) in enumerate(zip(self._columns, self.value)): ctx[str(self.context_id + i)] = col.to_database(val) - -class NotSet(object): - """ - Different from None, so you know when it just wasn't set compared to passing in None - """ - pass - - -def format_timestamp(timestamp): - if isinstance(timestamp, datetime): - epoch = datetime(1970, 1, 1) - ts = long((timestamp - epoch).total_seconds() * 1000) - else : - ts = long(timestamp) - return ts diff --git a/cqlengine/models.py b/cqlengine/models.py index ae0c7e4d04..d4ce1e7708 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -8,7 +8,6 @@ from cqlengine.query import DoesNotExist as _DoesNotExist from cqlengine.query import MultipleObjectsReturned as _MultipleObjectsReturned - class ModelDefinitionException(ModelException): pass @@ -279,6 +278,9 @@ class MultipleObjectsReturned(_MultipleObjectsReturned): pass def __init__(self, **values): self._values = {} + self._ttl = None + self._timestamp = None + for name, column in self._columns.items(): value = values.get(name, None) if value is not None or isinstance(column, columns.BaseContainerColumn): @@ -772,15 +774,3 @@ class Model(BaseModel): __metaclass__ = ModelMetaClass -class CounterBaseModel(BaseModel): - def _can_update(self): - # INSERT is not allowed for counter column families - return True - - -class CounterModel(CounterBaseModel): - """ - the db name for the column family can be set as the attribute db_name, or - it will be genertaed from the class name - """ - __metaclass__ = ModelMetaClass From 0f827bc5e32ed06af4db3f1ef75382b5f3840a12 Mon Sep 17 00:00:00 2001 From: Greg Doermann Date: Wed, 30 Jul 2014 19:28:28 -0600 Subject: [PATCH 0834/3726] Allow for a default TTL. --- cqlengine/models.py | 6 +++--- cqlengine/query.py | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cqlengine/models.py b/cqlengine/models.py index d4ce1e7708..66e956ad6c 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -278,7 +278,7 @@ class MultipleObjectsReturned(_MultipleObjectsReturned): pass def __init__(self, **values): self._values = {} - self._ttl = None + self._ttl = getattr(self, 'DEFAULT_TTL', None) self._timestamp = None for name, column in self._columns.items(): @@ -535,7 +535,7 @@ def save(self): v.reset_previous_value() self._is_persisted = True - self._ttl = None + self._ttl = getattr(self, 'DEFAULT_TTL', None) self._timestamp = None return self @@ -573,7 +573,7 @@ def update(self, **values): v.reset_previous_value() self._is_persisted = True - self._ttl = None + self._ttl = getattr(self, 'DEFAULT_TTL', None) self._timestamp = None return self diff --git a/cqlengine/query.py b/cqlengine/query.py index 738822f3d5..f847b85b1b 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -219,7 +219,7 @@ def __init__(self, model): self._result_idx = None self._batch = None - self._ttl = None + self._ttl = getattr(self, 'DEFAULT_TTL', None) self._consistency = None self._timestamp = None From 3ae0d633b26f9efc3a4fbd8da24d6737802d898e Mon Sep 17 00:00:00 2001 From: Greg Doermann Date: Thu, 31 Jul 2014 10:15:06 -0600 Subject: [PATCH 0835/3726] Changed DEFAULT_TTL to __default_ttl__ and added tests. --- cqlengine/models.py | 8 ++--- cqlengine/query.py | 2 +- cqlengine/tests/test_ttl.py | 62 +++++++++++++++++++++++++++++++++++++ 3 files changed, 67 insertions(+), 5 deletions(-) diff --git a/cqlengine/models.py b/cqlengine/models.py index 66e956ad6c..09612b2e7b 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -258,7 +258,7 @@ class MultipleObjectsReturned(_MultipleObjectsReturned): pass __queryset__ = ModelQuerySet __dmlquery__ = DMLQuery - #__ttl__ = None # this doesn't seem to be used + __default_ttl__ = None # default ttl value to use __consistency__ = None # can be set per query # Additional table properties @@ -278,7 +278,7 @@ class MultipleObjectsReturned(_MultipleObjectsReturned): pass def __init__(self, **values): self._values = {} - self._ttl = getattr(self, 'DEFAULT_TTL', None) + self._ttl = self.__default_ttl__ self._timestamp = None for name, column in self._columns.items(): @@ -535,7 +535,7 @@ def save(self): v.reset_previous_value() self._is_persisted = True - self._ttl = getattr(self, 'DEFAULT_TTL', None) + self._ttl = self.__default_ttl__ self._timestamp = None return self @@ -573,7 +573,7 @@ def update(self, **values): v.reset_previous_value() self._is_persisted = True - self._ttl = getattr(self, 'DEFAULT_TTL', None) + self._ttl = self.__default_ttl__ self._timestamp = None return self diff --git a/cqlengine/query.py b/cqlengine/query.py index f847b85b1b..0f039f9365 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -219,7 +219,7 @@ def __init__(self, model): self._result_idx = None self._batch = None - self._ttl = getattr(self, 'DEFAULT_TTL', None) + self._ttl = getattr(model, '__default_ttl__', None) self._consistency = None self._timestamp = None diff --git a/cqlengine/tests/test_ttl.py b/cqlengine/tests/test_ttl.py index bb5e1bb440..66fa8334ab 100644 --- a/cqlengine/tests/test_ttl.py +++ b/cqlengine/tests/test_ttl.py @@ -27,6 +27,28 @@ def tearDownClass(cls): drop_table(TestTTLModel) +class TestDefaultTTLModel(Model): + __keyspace__ = 'test' + __default_ttl__ = 20 + id = columns.UUID(primary_key=True, default=lambda:uuid4()) + count = columns.Integer() + text = columns.Text(required=False) + + +class BaseDefaultTTLTest(BaseCassEngTestCase): + + @classmethod + def setUpClass(cls): + super(BaseDefaultTTLTest, cls).setUpClass() + sync_table(TestDefaultTTLModel) + sync_table(TestTTLModel) + + @classmethod + def tearDownClass(cls): + super(BaseDefaultTTLTest, cls).tearDownClass() + drop_table(TestDefaultTTLModel) + drop_table(TestTTLModel) + class TTLQueryTests(BaseTTLTest): @@ -117,5 +139,45 @@ def test_ttl_included_with_blind_update(self): self.assertIn("USING TTL", query) +class TTLDefaultTest(BaseDefaultTTLTest): + def test_default_ttl_not_set(self): + session = get_session() + + o = TestTTLModel.create(text="some text") + tid = o.id + + self.assertIsNone(o._ttl) + + with mock.patch.object(session, 'execute') as m: + TestTTLModel.objects(id=tid).update(text="aligators") + + query = m.call_args[0][0].query_string + self.assertNotIn("USING TTL", query) + + def test_default_ttl_set(self): + session = get_session() + o = TestDefaultTTLModel.create(text="some text on ttl") + tid = o.id + + self.assertEqual(o._ttl, TestDefaultTTLModel.__default_ttl__) + + with mock.patch.object(session, 'execute') as m: + TestDefaultTTLModel.objects(id=tid).update(text="aligators expired") + + query = m.call_args[0][0].query_string + self.assertIn("USING TTL", query) + + def test_override_default_ttl(self): + session = get_session() + o = TestDefaultTTLModel.create(text="some text on ttl") + tid = o.id + + self.assertEqual(o._ttl, TestDefaultTTLModel.__default_ttl__) + o.ttl(3600) + self.assertEqual(o._ttl, 3600) + with mock.patch.object(session, 'execute') as m: + TestDefaultTTLModel.objects(id=tid).ttl(None).update(text="aligators expired") + query = m.call_args[0][0].query_string + self.assertNotIn("USING TTL", query) \ No newline at end of file From b8dd705c4a5b0162634ebc26ca78c687213c427e Mon Sep 17 00:00:00 2001 From: Greg Doermann Date: Thu, 31 Jul 2014 10:19:50 -0600 Subject: [PATCH 0836/3726] Added docs around __default_ttl__ --- docs/topics/models.rst | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/docs/topics/models.rst b/docs/topics/models.rst index 1602590795..83ac6979c8 100644 --- a/docs/topics/models.rst +++ b/docs/topics/models.rst @@ -212,6 +212,11 @@ Model Attributes **Prior to cqlengine 0.16, this setting defaulted to 'cqlengine'. As of 0.16, this field needs to be set on all non-abstract models, or their base classes.** + .. _ttl-change: + .. attribute:: Model.__default_ttl__ + + Sets the default ttl used by this model. This can be overridden by using the ``ttl(ttl_in_sec)`` method. + Table Polymorphism ================== From db216c4cb07909f5dffc0adfad52c1662ccb302d Mon Sep 17 00:00:00 2001 From: Kevin Deldycke Date: Wed, 6 Aug 2014 09:37:04 +0200 Subject: [PATCH 0837/3726] Fix caching documentation. --- docs/topics/models.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/topics/models.rst b/docs/topics/models.rst index 1602590795..0da09e77f7 100644 --- a/docs/topics/models.rst +++ b/docs/topics/models.rst @@ -331,11 +331,11 @@ Table Properties .. code-block:: python - from cqlengine import ROWS_ONLY, columns + from cqlengine import CACHING_ROWS_ONLY, columns from cqlengine.models import Model class User(Model): - __caching__ = ROWS_ONLY # cache only rows instead of keys only by default + __caching__ = CACHING_ROWS_ONLY # cache only rows instead of keys only by default __gc_grace_seconds__ = 86400 # 1 day instead of the default 10 days user_id = columns.UUID(primary_key=True) From be8ec75086b7ad5e6c4b3994e76a914e07606129 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Fri, 8 Aug 2014 11:01:07 -0700 Subject: [PATCH 0838/3726] updated changelog and comment on setup --- changelog | 6 ++++++ cqlengine/connection.py | 2 ++ 2 files changed, 8 insertions(+) diff --git a/changelog b/changelog index 2aa66d4186..2496a70271 100644 --- a/changelog +++ b/changelog @@ -1,5 +1,11 @@ CHANGELOG + +0.17.0 + +* retry_connect on setup() +* optional default TTL on model + 0.16.0 225: No handling of PagedResult from execute diff --git a/cqlengine/connection.py b/cqlengine/connection.py index fbe424cb27..4a6a426cef 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -48,6 +48,8 @@ def setup( :type consistency: int :param lazy_connect: True if should not connect until first use :type lazy_connect: bool + :param retry_connect: bool + :param retry_connect: True if we should retry to connect even if there was a connection failure initially """ global cluster, session, default_consistency_level, lazy_connect_args From c4955060554c1eb1520194c037fe20acdd6630fa Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Fri, 8 Aug 2014 11:03:29 -0700 Subject: [PATCH 0839/3726] added caching docs to changelog --- changelog | 2 ++ 1 file changed, 2 insertions(+) diff --git a/changelog b/changelog index 2496a70271..e044e0b7fc 100644 --- a/changelog +++ b/changelog @@ -5,6 +5,8 @@ CHANGELOG * retry_connect on setup() * optional default TTL on model +* fixed caching documentation + 0.16.0 From 9b2009830e0acfaf2177edf010805b1bb72ab60d Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Fri, 8 Aug 2014 11:20:20 -0700 Subject: [PATCH 0840/3726] version bump --- cqlengine/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cqlengine/VERSION b/cqlengine/VERSION index 2a0970ca75..c5523bd09b 100644 --- a/cqlengine/VERSION +++ b/cqlengine/VERSION @@ -1 +1 @@ -0.16.1 +0.17.0 From 40eb10ce2f113e069d3a80faba9353321a76dd70 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Tue, 12 Aug 2014 16:10:32 -0700 Subject: [PATCH 0841/3726] added primary key to examples --- docs/topics/models.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/topics/models.rst b/docs/topics/models.rst index 6545317969..d0e1a05f00 100644 --- a/docs/topics/models.rst +++ b/docs/topics/models.rst @@ -23,6 +23,7 @@ This example defines a Person table, with the columns ``first_name`` and ``last_ from cqlengine.models import Model class Person(Model): + id = columns.UUID(primary_key=True) first_name = columns.Text() last_name = columns.Text() @@ -145,6 +146,7 @@ Model Methods #using the person model from earlier: class Person(Model): + id = columns.UUID(primary_key=True) first_name = columns.Text() last_name = columns.Text() From 927e3ad28eaa54f99c6f71dfa4c60e86826c4e71 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Tue, 12 Aug 2014 16:20:41 -0700 Subject: [PATCH 0842/3726] fixed example setup() call --- docs/index.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/index.rst b/docs/index.rst index 7bb0e5e979..deda30ef0e 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -53,10 +53,10 @@ Getting Started # see http://datastax.github.io/python-driver/api/cassandra/cluster.html for options # the list of hosts will be passed to create a Cluster() instance - >>> connection.setup(['127.0.0.1']) + >>> connection.setup(['127.0.0.1'], "cqlengine") # if you're connecting to a 1.2 cluster - >>> connection.setup(['127.0.0.1'], protocol_version=1) + >>> connection.setup(['127.0.0.1'], "cqlengine", protocol_version=1) #...and create your CQL table >>> from cqlengine.management import sync_table From db892d011e56137c2bd50341cd7a371c6b5651d0 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Tue, 12 Aug 2014 16:22:58 -0700 Subject: [PATCH 0843/3726] fixed example --- docs/topics/connection.rst | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/topics/connection.rst b/docs/topics/connection.rst index 11eeda39e0..658b3fc439 100644 --- a/docs/topics/connection.rst +++ b/docs/topics/connection.rst @@ -16,6 +16,9 @@ If there is a problem with one of the servers, cqlengine will try to connect to :param hosts: list of hosts, strings in the :, or just :type hosts: list + :param default_keyspace: keyspace to default to + :type default_keyspace: str + :param consistency: the consistency level of the connection, defaults to 'ONE' :type consistency: int From 25ed60b2a82a43c86d889d7272e5f5c4cadabef4 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Tue, 12 Aug 2014 16:24:06 -0700 Subject: [PATCH 0844/3726] fixed readme --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 361fff385f..e417b452b5 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,7 @@ cqlengine is a Cassandra CQL 3 Object Mapper for Python [Documentation](https://cqlengine.readthedocs.org/en/latest/) -[Report a Bug](https://github.com/bdeggleston/cqlengine/issues) +[Report a Bug](https://github.com/cqlengine/cqlengine/issues) [Users Mailing List](https://groups.google.com/forum/?fromgroups#!forum/cqlengine-users) From 856fd29bdd8f0bbf50437d48e59ee70c13f24768 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Tue, 12 Aug 2014 16:25:20 -0700 Subject: [PATCH 0845/3726] fixed readme --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index e417b452b5..e4fef0a806 100644 --- a/README.md +++ b/README.md @@ -31,12 +31,12 @@ class ExampleModel(Model): created_at = columns.DateTime() description = columns.Text(required=False) -#next, setup the connection to your cassandra server(s)... +#next, setup the connection to your cassandra server(s) and the default keyspace... >>> from cqlengine import connection ->>> connection.setup(['127.0.0.1']) +>>> connection.setup(['127.0.0.1'], "cqlengine") # or if you're still on cassandra 1.2 ->>> connection.setup(['127.0.0.1'], protocol_version=1) +>>> connection.setup(['127.0.0.1'], "cqlengine", protocol_version=1) #...and create your CQL table >>> from cqlengine.management import sync_table From 89026db90d41392a754ea58de8c4aca412aca639 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Tue, 12 Aug 2014 16:59:44 -0700 Subject: [PATCH 0846/3726] bumped required driver version to 2.1 --- requirements.txt | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index e648425ad6..b9bac81778 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,4 +3,4 @@ ipdb Sphinx mock sure==1.2.5 -cassandra-driver>=2.0.0 +cassandra-driver>=2.1.0 diff --git a/setup.py b/setup.py index a427f36bee..afb7e12611 100644 --- a/setup.py +++ b/setup.py @@ -31,7 +31,7 @@ "Topic :: Software Development :: Libraries :: Python Modules", ], keywords='cassandra,cql,orm', - install_requires = ['cassandra-driver >= 2.0.0'], + install_requires = ['cassandra-driver >= 2.1.0'], author='Blake Eggleston, Jon Haddad', author_email='bdeggleston@gmail.com', url='https://github.com/cqlengine/cqlengine', From 860a8f830e698ca3151dcc7e60bd09cd785d5939 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Tue, 12 Aug 2014 17:18:49 -0700 Subject: [PATCH 0847/3726] inet address support --- cqlengine/columns.py | 3 +++ cqlengine/tests/columns/test_validation.py | 23 ++++++++++++++++++++++ 2 files changed, 26 insertions(+) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index 66286d38e8..e02ed26217 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -224,6 +224,9 @@ def to_python(self, value): class Ascii(Column): db_type = 'ascii' +class Inet(Column): + db_type = 'inet' + class Text(Column): db_type = 'text' diff --git a/cqlengine/tests/columns/test_validation.py b/cqlengine/tests/columns/test_validation.py index 0df006e8e9..1eb2b232dc 100644 --- a/cqlengine/tests/columns/test_validation.py +++ b/cqlengine/tests/columns/test_validation.py @@ -5,6 +5,7 @@ from decimal import Decimal as D from unittest import TestCase from uuid import uuid4, uuid1 +from cassandra import InvalidRequest from cqlengine import ValidationError from cqlengine.connection import execute @@ -23,6 +24,7 @@ from cqlengine.columns import Boolean from cqlengine.columns import Float from cqlengine.columns import Decimal +from cqlengine.columns import Inet from cqlengine.management import sync_table, drop_table from cqlengine.models import Model @@ -345,3 +347,24 @@ def test_conversion_specific_date(self): # checks that we created a UUID1 with the proper timestamp assert new_dt == dt +class TestInet(BaseCassEngTestCase): + + class InetTestModel(Model): + id = UUID(primary_key=True, default=uuid4) + address = Inet() + + def setUp(self): + drop_table(self.InetTestModel) + sync_table(self.InetTestModel) + + def test_inet_saves(self): + tmp = self.InetTestModel.create(address="192.168.1.1") + + m = self.InetTestModel.get(id=tmp.id) + + assert m.address == "192.168.1.1" + + def test_non_address_fails(self): + with self.assertRaises(InvalidRequest): + self.InetTestModel.create(address="ham sandwich") + From 610a49ba4f65ca65ef2dff0f887aa506d38984a4 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Tue, 12 Aug 2014 17:45:54 -0700 Subject: [PATCH 0848/3726] added python 3.4 support and working to figure out how to get multiple verisions of Cassandra --- .travis.yml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/.travis.yml b/.travis.yml index 172874a6f5..565665c484 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,11 +1,20 @@ language: python + +env: + - C_VERSION=1.2 + - C_VERSION=2.0 + python: - "2.7" + - "3.4" + services: - cassandra + before_install: - sudo sh -c "echo 'JVM_OPTS=\"\${JVM_OPTS} -Djava.net.preferIPv4Stack=false\"' >> /usr/local/cassandra/conf/cassandra-env.sh" - sudo service cassandra start + install: - "pip install -r requirements.txt --use-mirrors" #- "pip install pytest --use-mirrors" From 21f5437b2a6e0c19081f59118400032440ecb44b Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Tue, 12 Aug 2014 18:26:05 -0700 Subject: [PATCH 0849/3726] updating travis to use debian repos for C testing --- .travis.yml | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/.travis.yml b/.travis.yml index 565665c484..6fc3e61f03 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,17 +1,25 @@ language: python env: - - C_VERSION=1.2 - - C_VERSION=2.0 + - C_VERSION=12 + - C_VERSION=20 python: - "2.7" - "3.4" -services: - - cassandra +#services: # removed to try to get before_install to use custom versions +# - cassandra before_install: + - echo "deb http://www.apache.org/dist/cassandra/debian ${C_VERSION}x main" > /etc/apt/sources.list.d/cassandra.list + - echo "deb-src http://www.apache.org/dist/cassandra/debian ${C_VERSION}x main" >> /etc/apt/sources.list.d/cassandra.list + - gpg --keyserver pgp.mit.edu --recv-keys F758CE318D77295D + - gpg --export --armor F758CE318D77295D | apt-key add - + - gpg --keyserver pgp.mit.edu --recv-keys 2B5C1B00 + - gpg --export --armor 2B5C1B00 | sudo apt-key add - + - apt-get update + - apt-get install -y cassandra - sudo sh -c "echo 'JVM_OPTS=\"\${JVM_OPTS} -Djava.net.preferIPv4Stack=false\"' >> /usr/local/cassandra/conf/cassandra-env.sh" - sudo service cassandra start From cfc0c32596237fd83f8c59e2f6528b4ac46a6c58 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Tue, 12 Aug 2014 18:28:37 -0700 Subject: [PATCH 0850/3726] sudo everywhere apparently travis doesn't use root by default --- .travis.yml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/.travis.yml b/.travis.yml index 6fc3e61f03..c8dc21c8ed 100644 --- a/.travis.yml +++ b/.travis.yml @@ -12,14 +12,14 @@ python: # - cassandra before_install: - - echo "deb http://www.apache.org/dist/cassandra/debian ${C_VERSION}x main" > /etc/apt/sources.list.d/cassandra.list - - echo "deb-src http://www.apache.org/dist/cassandra/debian ${C_VERSION}x main" >> /etc/apt/sources.list.d/cassandra.list - - gpg --keyserver pgp.mit.edu --recv-keys F758CE318D77295D - - gpg --export --armor F758CE318D77295D | apt-key add - - - gpg --keyserver pgp.mit.edu --recv-keys 2B5C1B00 - - gpg --export --armor 2B5C1B00 | sudo apt-key add - - - apt-get update - - apt-get install -y cassandra + - sudo echo "deb http://www.apache.org/dist/cassandra/debian ${C_VERSION}x main" > /etc/apt/sources.list.d/cassandra.list + - sudo echo "deb-src http://www.apache.org/dist/cassandra/debian ${C_VERSION}x main" >> /etc/apt/sources.list.d/cassandra.list + - sudo gpg --keyserver pgp.mit.edu --recv-keys F758CE318D77295D + - sudo gpg --export --armor F758CE318D77295D | apt-key add - + - sudo gpg --keyserver pgp.mit.edu --recv-keys 2B5C1B00 + - sudo gpg --export --armor 2B5C1B00 | sudo apt-key add - + - sudo apt-get update + - sudo apt-get install -y cassandra - sudo sh -c "echo 'JVM_OPTS=\"\${JVM_OPTS} -Djava.net.preferIPv4Stack=false\"' >> /usr/local/cassandra/conf/cassandra-env.sh" - sudo service cassandra start From 5084d9770389752b4e1fdc678dbac15942740641 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Tue, 12 Aug 2014 18:31:26 -0700 Subject: [PATCH 0851/3726] apparently travis doesn't like the ubuntu sources.list.d --- .travis.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index c8dc21c8ed..6475c21b69 100644 --- a/.travis.yml +++ b/.travis.yml @@ -12,8 +12,8 @@ python: # - cassandra before_install: - - sudo echo "deb http://www.apache.org/dist/cassandra/debian ${C_VERSION}x main" > /etc/apt/sources.list.d/cassandra.list - - sudo echo "deb-src http://www.apache.org/dist/cassandra/debian ${C_VERSION}x main" >> /etc/apt/sources.list.d/cassandra.list + - sudo echo "deb http://www.apache.org/dist/cassandra/debian ${C_VERSION}x main" >> /etc/apt/sources.list + - sudo echo "deb-src http://www.apache.org/dist/cassandra/debian ${C_VERSION}x main" >> /etc/apt/sources.list - sudo gpg --keyserver pgp.mit.edu --recv-keys F758CE318D77295D - sudo gpg --export --armor F758CE318D77295D | apt-key add - - sudo gpg --keyserver pgp.mit.edu --recv-keys 2B5C1B00 From 0e6511b68789beefa57a5071d7c51b12cc15c17e Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Tue, 12 Aug 2014 18:33:49 -0700 Subject: [PATCH 0852/3726] tee instead of redire --- .travis.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index 6475c21b69..791bc3c917 100644 --- a/.travis.yml +++ b/.travis.yml @@ -12,8 +12,8 @@ python: # - cassandra before_install: - - sudo echo "deb http://www.apache.org/dist/cassandra/debian ${C_VERSION}x main" >> /etc/apt/sources.list - - sudo echo "deb-src http://www.apache.org/dist/cassandra/debian ${C_VERSION}x main" >> /etc/apt/sources.list + - sudo echo "deb http://www.apache.org/dist/cassandra/debian ${C_VERSION}x main" | sudo tee -a /etc/apt/sources.list + - sudo echo "deb-src http://www.apache.org/dist/cassandra/debian ${C_VERSION}x main" | sudo tee -a /etc/apt/sources.list - sudo gpg --keyserver pgp.mit.edu --recv-keys F758CE318D77295D - sudo gpg --export --armor F758CE318D77295D | apt-key add - - sudo gpg --keyserver pgp.mit.edu --recv-keys 2B5C1B00 From 731be3c246a6f1242e9c67b48f6d2c7a7ce68ea7 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Tue, 12 Aug 2014 18:37:08 -0700 Subject: [PATCH 0853/3726] MOAR SUDO --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 791bc3c917..7cb3f7fb38 100644 --- a/.travis.yml +++ b/.travis.yml @@ -15,7 +15,7 @@ before_install: - sudo echo "deb http://www.apache.org/dist/cassandra/debian ${C_VERSION}x main" | sudo tee -a /etc/apt/sources.list - sudo echo "deb-src http://www.apache.org/dist/cassandra/debian ${C_VERSION}x main" | sudo tee -a /etc/apt/sources.list - sudo gpg --keyserver pgp.mit.edu --recv-keys F758CE318D77295D - - sudo gpg --export --armor F758CE318D77295D | apt-key add - + - sudo gpg --export --armor F758CE318D77295D | sudo apt-key add - - sudo gpg --keyserver pgp.mit.edu --recv-keys 2B5C1B00 - sudo gpg --export --armor 2B5C1B00 | sudo apt-key add - - sudo apt-get update From 32600a80753846d2c49b21a20f1ba3dbd8f18325 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Tue, 12 Aug 2014 18:51:45 -0700 Subject: [PATCH 0854/3726] remove the existing limits file --- .travis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.travis.yml b/.travis.yml index 7cb3f7fb38..478bdb9da3 100644 --- a/.travis.yml +++ b/.travis.yml @@ -19,6 +19,7 @@ before_install: - sudo gpg --keyserver pgp.mit.edu --recv-keys 2B5C1B00 - sudo gpg --export --armor 2B5C1B00 | sudo apt-key add - - sudo apt-get update + - sudo rm /etc/security/limits.d/cassandra.conf # travis apparently already has this file - sudo apt-get install -y cassandra - sudo sh -c "echo 'JVM_OPTS=\"\${JVM_OPTS} -Djava.net.preferIPv4Stack=false\"' >> /usr/local/cassandra/conf/cassandra-env.sh" - sudo service cassandra start From 4c3c20425dd02a236a8ef0a6fc4387b0bc9c073f Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Tue, 12 Aug 2014 18:55:32 -0700 Subject: [PATCH 0855/3726] trying to get travis to clean out it's cassandra version --- .travis.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 478bdb9da3..11c1d4beea 100644 --- a/.travis.yml +++ b/.travis.yml @@ -12,6 +12,7 @@ python: # - cassandra before_install: + - sudo apt-get remove -y cassandra - sudo echo "deb http://www.apache.org/dist/cassandra/debian ${C_VERSION}x main" | sudo tee -a /etc/apt/sources.list - sudo echo "deb-src http://www.apache.org/dist/cassandra/debian ${C_VERSION}x main" | sudo tee -a /etc/apt/sources.list - sudo gpg --keyserver pgp.mit.edu --recv-keys F758CE318D77295D @@ -19,7 +20,7 @@ before_install: - sudo gpg --keyserver pgp.mit.edu --recv-keys 2B5C1B00 - sudo gpg --export --armor 2B5C1B00 | sudo apt-key add - - sudo apt-get update - - sudo rm /etc/security/limits.d/cassandra.conf # travis apparently already has this file +# - sudo rm /etc/security/limits.d/cassandra.conf # travis apparently already has this file - sudo apt-get install -y cassandra - sudo sh -c "echo 'JVM_OPTS=\"\${JVM_OPTS} -Djava.net.preferIPv4Stack=false\"' >> /usr/local/cassandra/conf/cassandra-env.sh" - sudo service cassandra start From 7e3c0b57472fdcf877cfbeaa68bb5219b2604894 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Tue, 12 Aug 2014 18:58:21 -0700 Subject: [PATCH 0856/3726] testing auto accept conf settings... --- .travis.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index 11c1d4beea..5ba9789762 100644 --- a/.travis.yml +++ b/.travis.yml @@ -12,7 +12,6 @@ python: # - cassandra before_install: - - sudo apt-get remove -y cassandra - sudo echo "deb http://www.apache.org/dist/cassandra/debian ${C_VERSION}x main" | sudo tee -a /etc/apt/sources.list - sudo echo "deb-src http://www.apache.org/dist/cassandra/debian ${C_VERSION}x main" | sudo tee -a /etc/apt/sources.list - sudo gpg --keyserver pgp.mit.edu --recv-keys F758CE318D77295D @@ -21,7 +20,7 @@ before_install: - sudo gpg --export --armor 2B5C1B00 | sudo apt-key add - - sudo apt-get update # - sudo rm /etc/security/limits.d/cassandra.conf # travis apparently already has this file - - sudo apt-get install -y cassandra + - sudo apt-get -o Dpkg::Options::="--force-confnew" install -y cassandra - sudo sh -c "echo 'JVM_OPTS=\"\${JVM_OPTS} -Djava.net.preferIPv4Stack=false\"' >> /usr/local/cassandra/conf/cassandra-env.sh" - sudo service cassandra start From d4aa54d54c8dcb8a1b8b26c72d82087f54c23c18 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Tue, 12 Aug 2014 19:04:40 -0700 Subject: [PATCH 0857/3726] lets just go right into the nose tests --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 5ba9789762..d8258a5efb 100644 --- a/.travis.yml +++ b/.travis.yml @@ -29,6 +29,6 @@ install: #- "pip install pytest --use-mirrors" script: - - while [ ! -f /var/run/cassandra.pid ] ; do sleep 1 ; done # wait until cassandra is ready +# - while [ ! -f /var/run/cassandra.pid ] ; do sleep 1 ; done # wait until cassandra is ready, i don't think this is necessary anymore, and it seems to hang - "nosetests --no-skip" # - "py.test cqlengine/tests/" From 6857602675d7dd0e197fdcdd5c59b4d47292c0d9 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Tue, 12 Aug 2014 19:27:02 -0700 Subject: [PATCH 0858/3726] trying to fix protocol version issue --- .travis.yml | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/.travis.yml b/.travis.yml index d8258a5efb..d30ea0cf74 100644 --- a/.travis.yml +++ b/.travis.yml @@ -8,9 +8,6 @@ python: - "2.7" - "3.4" -#services: # removed to try to get before_install to use custom versions -# - cassandra - before_install: - sudo echo "deb http://www.apache.org/dist/cassandra/debian ${C_VERSION}x main" | sudo tee -a /etc/apt/sources.list - sudo echo "deb-src http://www.apache.org/dist/cassandra/debian ${C_VERSION}x main" | sudo tee -a /etc/apt/sources.list @@ -19,16 +16,13 @@ before_install: - sudo gpg --keyserver pgp.mit.edu --recv-keys 2B5C1B00 - sudo gpg --export --armor 2B5C1B00 | sudo apt-key add - - sudo apt-get update -# - sudo rm /etc/security/limits.d/cassandra.conf # travis apparently already has this file - sudo apt-get -o Dpkg::Options::="--force-confnew" install -y cassandra - sudo sh -c "echo 'JVM_OPTS=\"\${JVM_OPTS} -Djava.net.preferIPv4Stack=false\"' >> /usr/local/cassandra/conf/cassandra-env.sh" - sudo service cassandra start + - if [ "$C_VERSION" -le "20" ]; then export CASSANDRA_PROTOCOL_VERSION=1; fi install: - "pip install -r requirements.txt --use-mirrors" - #- "pip install pytest --use-mirrors" script: -# - while [ ! -f /var/run/cassandra.pid ] ; do sleep 1 ; done # wait until cassandra is ready, i don't think this is necessary anymore, and it seems to hang - "nosetests --no-skip" - # - "py.test cqlengine/tests/" From f30390a3a167d586e4c355c3bb50b5c1f03c4687 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 13 Aug 2014 07:57:59 -0700 Subject: [PATCH 0859/3726] changed the sorted() calls to use assertItemsEqual --- cqlengine/tests/model/test_model_io.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cqlengine/tests/model/test_model_io.py b/cqlengine/tests/model/test_model_io.py index 1faaaf029a..e19f7ad0b5 100644 --- a/cqlengine/tests/model/test_model_io.py +++ b/cqlengine/tests/model/test_model_io.py @@ -62,7 +62,8 @@ def test_model_read_as_dict(self): 'a_bool': tm.a_bool, } self.assertEquals(sorted(tm.keys()), sorted(column_dict.keys())) - self.assertEquals(sorted(tm.values()), sorted(column_dict.values())) + + self.assertItemsEqual(tm.values(), column_dict.values()) self.assertEquals( sorted(tm.items(), key=itemgetter(0)), sorted(column_dict.items(), key=itemgetter(0))) From 0dfe9c991a08a951aaefccb90202e8f7c64330d2 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 13 Aug 2014 10:11:36 -0700 Subject: [PATCH 0860/3726] added big int --- docs/topics/models.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/topics/models.rst b/docs/topics/models.rst index d0e1a05f00..7beda0e40f 100644 --- a/docs/topics/models.rst +++ b/docs/topics/models.rst @@ -87,6 +87,7 @@ Column Types * :class:`~cqlengine.columns.Ascii` * :class:`~cqlengine.columns.Text` * :class:`~cqlengine.columns.Integer` + * :class:`~cqlengine.columns.BigInt` * :class:`~cqlengine.columns.DateTime` * :class:`~cqlengine.columns.UUID` * :class:`~cqlengine.columns.TimeUUID` From 8df170e549604085f6dcdbc6828dc445b47f6a49 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 13 Aug 2014 10:26:39 -0700 Subject: [PATCH 0861/3726] added test runner --- bin/test.py | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100755 bin/test.py diff --git a/bin/test.py b/bin/test.py new file mode 100755 index 0000000000..6a49a0cc4c --- /dev/null +++ b/bin/test.py @@ -0,0 +1,8 @@ +#!/usr/bin/env python + + +import nose + +# setup cassandra + +nose.main() From 260037750b5b791e18d2894077fac6509184b60b Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 13 Aug 2014 10:33:42 -0700 Subject: [PATCH 0862/3726] upgrading testing to use bin/test.py, which will let me do some beautiful magic --- .gitignore | 1 - .travis.yml | 10 +++++----- cqlengine/tests/management/test_management.py | 2 +- 3 files changed, 6 insertions(+), 7 deletions(-) diff --git a/.gitignore b/.gitignore index 1b5006e6b8..314eec67f7 100644 --- a/.gitignore +++ b/.gitignore @@ -7,7 +7,6 @@ dist build eggs parts -bin var sdist develop-eggs diff --git a/.travis.yml b/.travis.yml index d30ea0cf74..9dfeb0fbbd 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,16 +1,16 @@ language: python env: - - C_VERSION=12 - - C_VERSION=20 + - CASSANDRA_VERSION=12 + - CASSANDRA_VERSION=20 python: - "2.7" - "3.4" before_install: - - sudo echo "deb http://www.apache.org/dist/cassandra/debian ${C_VERSION}x main" | sudo tee -a /etc/apt/sources.list - - sudo echo "deb-src http://www.apache.org/dist/cassandra/debian ${C_VERSION}x main" | sudo tee -a /etc/apt/sources.list + - sudo echo "deb http://www.apache.org/dist/cassandra/debian ${CASSANDRA_VERSION}x main" | sudo tee -a /etc/apt/sources.list + - sudo echo "deb-src http://www.apache.org/dist/cassandra/debian ${CASSANDRA_VERSION}x main" | sudo tee -a /etc/apt/sources.list - sudo gpg --keyserver pgp.mit.edu --recv-keys F758CE318D77295D - sudo gpg --export --armor F758CE318D77295D | sudo apt-key add - - sudo gpg --keyserver pgp.mit.edu --recv-keys 2B5C1B00 @@ -25,4 +25,4 @@ install: - "pip install -r requirements.txt --use-mirrors" script: - - "nosetests --no-skip" + - "bin/test.py --no-skip" diff --git a/cqlengine/tests/management/test_management.py b/cqlengine/tests/management/test_management.py index 1ed3137f92..03cf16490e 100644 --- a/cqlengine/tests/management/test_management.py +++ b/cqlengine/tests/management/test_management.py @@ -124,7 +124,7 @@ class ModelWithTableProperties(Model): __default_time_to_live__ = 4756 __gc_grace_seconds__ = 2063 __index_interval__ = 98706 - __memtable_flush_period_in_ms__ = 43681 + #__memtable_flush_period_in_ms__ = 43681 # not compatible w/ cassandra 1.2 __populate_io_cache_on_flush__ = True __read_repair_chance__ = 0.17985 __replicate_on_write__ = False From 2ad4808c9749229cc59c9aeaeeee83f72c91985e Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 13 Aug 2014 10:50:35 -0700 Subject: [PATCH 0863/3726] moving setup into the bin/test.py --- bin/test.py | 21 ++++++++++++++++++++- cqlengine/tests/base.py | 9 --------- 2 files changed, 20 insertions(+), 10 deletions(-) diff --git a/bin/test.py b/bin/test.py index 6a49a0cc4c..08a966e31a 100755 --- a/bin/test.py +++ b/bin/test.py @@ -1,8 +1,27 @@ #!/usr/bin/env python - +import os import nose +import sys + +sys.path.append("") # setup cassandra +from cqlengine import connection + +if os.environ.get('CASSANDRA_TEST_HOST'): + CASSANDRA_TEST_HOST = os.environ['CASSANDRA_TEST_HOST'] +else: + CASSANDRA_TEST_HOST = 'localhost' + +protocol_version = int(os.environ.get("CASSANDRA_PROTOCOL_VERSION", 2)) + +connection.setup([CASSANDRA_TEST_HOST], protocol_version=protocol_version, default_keyspace='cqlengine_test') + +try: + c_version = os.environ["CASSANDRA_VERSION"] +except: + print "CASSANDRA_VERSION must be set as an environment variable. One of (12, 20, 21)" + raise nose.main() diff --git a/cqlengine/tests/base.py b/cqlengine/tests/base.py index 3250de1471..a9dff94cd4 100644 --- a/cqlengine/tests/base.py +++ b/cqlengine/tests/base.py @@ -4,15 +4,6 @@ from cqlengine.connection import get_session -if os.environ.get('CASSANDRA_TEST_HOST'): - CASSANDRA_TEST_HOST = os.environ['CASSANDRA_TEST_HOST'] -else: - CASSANDRA_TEST_HOST = 'localhost' - -protocol_version = int(os.environ.get("CASSANDRA_PROTOCOL_VERSION", 2)) - -connection.setup([CASSANDRA_TEST_HOST], protocol_version=protocol_version, default_keyspace='cqlengine_test') - class BaseCassEngTestCase(TestCase): # @classmethod From 30dbc6e4d97f25a358268377d4e1c085fbde18c0 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 13 Aug 2014 13:06:14 -0700 Subject: [PATCH 0864/3726] fixes for different C versions --- cqlengine/tests/base.py | 1 + cqlengine/tests/management/test_management.py | 46 ++++++++++--------- 2 files changed, 26 insertions(+), 21 deletions(-) diff --git a/cqlengine/tests/base.py b/cqlengine/tests/base.py index a9dff94cd4..c195bae9dc 100644 --- a/cqlengine/tests/base.py +++ b/cqlengine/tests/base.py @@ -3,6 +3,7 @@ import os from cqlengine.connection import get_session +CASSANDRA_VERSION = os.environ['CASSANDRA_VERSION'] class BaseCassEngTestCase(TestCase): diff --git a/cqlengine/tests/management/test_management.py b/cqlengine/tests/management/test_management.py index 03cf16490e..b662032220 100644 --- a/cqlengine/tests/management/test_management.py +++ b/cqlengine/tests/management/test_management.py @@ -2,7 +2,7 @@ from cqlengine import ALL, CACHING_ALL, CACHING_NONE from cqlengine.exceptions import CQLEngineException from cqlengine.management import get_fields, sync_table, drop_table -from cqlengine.tests.base import BaseCassEngTestCase +from cqlengine.tests.base import BaseCassEngTestCase, CASSANDRA_VERSION from cqlengine import management from cqlengine.tests.query.test_queryset import TestModel from cqlengine.models import Model @@ -124,7 +124,6 @@ class ModelWithTableProperties(Model): __default_time_to_live__ = 4756 __gc_grace_seconds__ = 2063 __index_interval__ = 98706 - #__memtable_flush_period_in_ms__ = 43681 # not compatible w/ cassandra 1.2 __populate_io_cache_on_flush__ = True __read_repair_chance__ = 0.17985 __replicate_on_write__ = False @@ -132,6 +131,9 @@ class ModelWithTableProperties(Model): key = columns.UUID(primary_key=True) +# kind of a hack, but we only test this property on C >= 2.0 +if CASSANDRA_VERSION >= 20: + ModelWithTableProperties.__memtable_flush_period_in_ms__ = 43681 class TablePropertiesTests(BaseCassEngTestCase): @@ -139,26 +141,28 @@ def setUp(self): drop_table(ModelWithTableProperties) def test_set_table_properties(self): + sync_table(ModelWithTableProperties) - self.assertDictContainsSubset({ - 'bloom_filter_fp_chance': 0.76328, - 'caching': CACHING_ALL, - 'comment': 'TxfguvBdzwROQALmQBOziRMbkqVGFjqcJfVhwGR', - 'default_time_to_live': 4756, - 'gc_grace_seconds': 2063, - 'index_interval': 98706, - 'memtable_flush_period_in_ms': 43681, - 'populate_io_cache_on_flush': True, - 'read_repair_chance': 0.17985, - 'replicate_on_write': False, - # For some reason 'dclocal_read_repair_chance' in CQL is called - # just 'local_read_repair_chance' in the schema table. - # Source: https://issues.apache.org/jira/browse/CASSANDRA-6717 - - # TODO: due to a bug in the native driver i'm not seeing the local read repair chance show up - #'local_read_repair_chance': 0.50811, - - }, management.get_table_settings(ModelWithTableProperties).options) + expected = {'bloom_filter_fp_chance': 0.76328, + 'caching': CACHING_ALL, + 'comment': 'TxfguvBdzwROQALmQBOziRMbkqVGFjqcJfVhwGR', + 'default_time_to_live': 4756, + 'gc_grace_seconds': 2063, + 'index_interval': 98706, + 'populate_io_cache_on_flush': True, + 'read_repair_chance': 0.17985, + 'replicate_on_write': False + # For some reason 'dclocal_read_repair_chance' in CQL is called + # just 'local_read_repair_chance' in the schema table. + # Source: https://issues.apache.org/jira/browse/CASSANDRA-6717 + # TODO: due to a bug in the native driver i'm not seeing the local read repair chance show up + # 'local_read_repair_chance': 0.50811, + } + + if CASSANDRA_VERSION >= 20: + expected['memtable_flush_period_in_ms'] = 43681 + + self.assertDictContainsSubset(expected, management.get_table_settings(ModelWithTableProperties).options) def test_table_property_update(self): ModelWithTableProperties.__bloom_filter_fp_chance__ = 0.66778 From 503e2d1ecfa04669a70c1d7b84e8bfc5dc18c4f7 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 13 Aug 2014 13:22:08 -0700 Subject: [PATCH 0865/3726] fixing protocol version --- bin/test.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/bin/test.py b/bin/test.py index 08a966e31a..7cefb356c6 100755 --- a/bin/test.py +++ b/bin/test.py @@ -9,19 +9,23 @@ # setup cassandra from cqlengine import connection +try: + CASSANDRA_VERSION = os.environ["CASSANDRA_VERSION"] +except: + print "CASSANDRA_VERSION must be set as an environment variable. One of (12, 20, 21)" + raise + if os.environ.get('CASSANDRA_TEST_HOST'): CASSANDRA_TEST_HOST = os.environ['CASSANDRA_TEST_HOST'] else: CASSANDRA_TEST_HOST = 'localhost' -protocol_version = int(os.environ.get("CASSANDRA_PROTOCOL_VERSION", 2)) +if CASSANDRA_VERSION < 20: + protocol_version = 1 +else: + protocol_version = 2 connection.setup([CASSANDRA_TEST_HOST], protocol_version=protocol_version, default_keyspace='cqlengine_test') -try: - c_version = os.environ["CASSANDRA_VERSION"] -except: - print "CASSANDRA_VERSION must be set as an environment variable. One of (12, 20, 21)" - raise nose.main() From 381d8fbb8843cdb82dde24295ecb19eab96e00b8 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 13 Aug 2014 13:48:25 -0700 Subject: [PATCH 0866/3726] fixing c 1.2 --- bin/test.py | 2 +- cqlengine/tests/base.py | 1 - tox.ini | 2 +- 3 files changed, 2 insertions(+), 3 deletions(-) diff --git a/bin/test.py b/bin/test.py index 7cefb356c6..9f4d278c6a 100755 --- a/bin/test.py +++ b/bin/test.py @@ -10,7 +10,7 @@ from cqlengine import connection try: - CASSANDRA_VERSION = os.environ["CASSANDRA_VERSION"] + CASSANDRA_VERSION = int(os.environ["CASSANDRA_VERSION"]) except: print "CASSANDRA_VERSION must be set as an environment variable. One of (12, 20, 21)" raise diff --git a/cqlengine/tests/base.py b/cqlengine/tests/base.py index c195bae9dc..61b5db21ed 100644 --- a/cqlengine/tests/base.py +++ b/cqlengine/tests/base.py @@ -1,5 +1,4 @@ from unittest import TestCase -from cqlengine import connection import os from cqlengine.connection import get_session diff --git a/tox.ini b/tox.ini index eee198488f..fdac408cc9 100644 --- a/tox.ini +++ b/tox.ini @@ -3,4 +3,4 @@ envlist=py27,py34 [testenv] deps= -rrequirements.txt -commands=nosetests --no-skip +commands=bin/test.py --no-skip From 545275fdb2261a50c3703befaa2e9c9585546215 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 13 Aug 2014 14:09:58 -0700 Subject: [PATCH 0867/3726] fixing environment var --- bin/test.py | 1 - cqlengine/tests/base.py | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/bin/test.py b/bin/test.py index 9f4d278c6a..fcfa7c6823 100755 --- a/bin/test.py +++ b/bin/test.py @@ -27,5 +27,4 @@ connection.setup([CASSANDRA_TEST_HOST], protocol_version=protocol_version, default_keyspace='cqlengine_test') - nose.main() diff --git a/cqlengine/tests/base.py b/cqlengine/tests/base.py index 61b5db21ed..62aee389c6 100644 --- a/cqlengine/tests/base.py +++ b/cqlengine/tests/base.py @@ -2,7 +2,7 @@ import os from cqlengine.connection import get_session -CASSANDRA_VERSION = os.environ['CASSANDRA_VERSION'] +CASSANDRA_VERSION = int(os.environ['CASSANDRA_VERSION']) class BaseCassEngTestCase(TestCase): From d6743df00a76883d56a91d84cca5d6c50a06349c Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 13 Aug 2014 14:12:33 -0700 Subject: [PATCH 0868/3726] fix for python 3 print --- bin/test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/test.py b/bin/test.py index fcfa7c6823..4ac302a29e 100755 --- a/bin/test.py +++ b/bin/test.py @@ -12,7 +12,7 @@ try: CASSANDRA_VERSION = int(os.environ["CASSANDRA_VERSION"]) except: - print "CASSANDRA_VERSION must be set as an environment variable. One of (12, 20, 21)" + print("CASSANDRA_VERSION must be set as an environment variable. One of (12, 20, 21)") raise if os.environ.get('CASSANDRA_TEST_HOST'): From 9411dc7cebe976ad58606cc8d29cd1594c185617 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 13 Aug 2014 14:33:31 -0700 Subject: [PATCH 0869/3726] moved certain management options into block reserved for c > 2.0 --- cqlengine/tests/management/test_management.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/cqlengine/tests/management/test_management.py b/cqlengine/tests/management/test_management.py index b662032220..923d33b5ac 100644 --- a/cqlengine/tests/management/test_management.py +++ b/cqlengine/tests/management/test_management.py @@ -121,9 +121,7 @@ class ModelWithTableProperties(Model): __bloom_filter_fp_chance__ = 0.76328 __caching__ = CACHING_ALL __comment__ = 'TxfguvBdzwROQALmQBOziRMbkqVGFjqcJfVhwGR' - __default_time_to_live__ = 4756 __gc_grace_seconds__ = 2063 - __index_interval__ = 98706 __populate_io_cache_on_flush__ = True __read_repair_chance__ = 0.17985 __replicate_on_write__ = False @@ -134,6 +132,8 @@ class ModelWithTableProperties(Model): # kind of a hack, but we only test this property on C >= 2.0 if CASSANDRA_VERSION >= 20: ModelWithTableProperties.__memtable_flush_period_in_ms__ = 43681 + ModelWithTableProperties.__index_interval__ = 98706 + ModelWithTableProperties.__default_time_to_live__ = 4756 class TablePropertiesTests(BaseCassEngTestCase): @@ -146,9 +146,7 @@ def test_set_table_properties(self): expected = {'bloom_filter_fp_chance': 0.76328, 'caching': CACHING_ALL, 'comment': 'TxfguvBdzwROQALmQBOziRMbkqVGFjqcJfVhwGR', - 'default_time_to_live': 4756, 'gc_grace_seconds': 2063, - 'index_interval': 98706, 'populate_io_cache_on_flush': True, 'read_repair_chance': 0.17985, 'replicate_on_write': False @@ -160,6 +158,8 @@ def test_set_table_properties(self): } if CASSANDRA_VERSION >= 20: + expected['default_time_to_live'] = 4756, + expected['index_interval'] = 98706 expected['memtable_flush_period_in_ms'] = 43681 self.assertDictContainsSubset(expected, management.get_table_settings(ModelWithTableProperties).options) From 733025e5e93ac03068c297e779167ee5c673d10f Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 13 Aug 2014 15:09:53 -0700 Subject: [PATCH 0870/3726] getting ready for python 3 --- cqlengine/columns.py | 14 +++---- cqlengine/tests/management/test_management.py | 41 ++++++++++--------- setup.py | 4 +- 3 files changed, 31 insertions(+), 28 deletions(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index e02ed26217..bfe7aabd10 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -423,16 +423,16 @@ def from_datetime(self, dt): clock_seq = None nanoseconds = int(timestamp * 1e9) - timestamp = int(nanoseconds // 100) + 0x01b21dd213814000L + timestamp = int(nanoseconds // 100) + 0x01b21dd213814000 if clock_seq is None: import random - clock_seq = random.randrange(1 << 14L) # instead of stable storage - time_low = timestamp & 0xffffffffL - time_mid = (timestamp >> 32L) & 0xffffL - time_hi_version = (timestamp >> 48L) & 0x0fffL - clock_seq_low = clock_seq & 0xffL - clock_seq_hi_variant = (clock_seq >> 8L) & 0x3fL + clock_seq = random.randrange(1 << 14) # instead of stable storage + time_low = timestamp & 0xffffffff + time_mid = (timestamp >> 32) & 0xffff + time_hi_version = (timestamp >> 48) & 0x0fff + clock_seq_low = clock_seq & 0xff + clock_seq_hi_variant = (clock_seq >> 8) & 0x3f if node is None: node = getnode() return pyUUID(fields=(time_low, time_mid, time_hi_version, diff --git a/cqlengine/tests/management/test_management.py b/cqlengine/tests/management/test_management.py index 923d33b5ac..4c586eac4d 100644 --- a/cqlengine/tests/management/test_management.py +++ b/cqlengine/tests/management/test_management.py @@ -158,7 +158,7 @@ def test_set_table_properties(self): } if CASSANDRA_VERSION >= 20: - expected['default_time_to_live'] = 4756, + expected['default_time_to_live'] = 4756 expected['index_interval'] = 98706 expected['memtable_flush_period_in_ms'] = 43681 @@ -168,34 +168,37 @@ def test_table_property_update(self): ModelWithTableProperties.__bloom_filter_fp_chance__ = 0.66778 ModelWithTableProperties.__caching__ = CACHING_NONE ModelWithTableProperties.__comment__ = 'xirAkRWZVVvsmzRvXamiEcQkshkUIDINVJZgLYSdnGHweiBrAiJdLJkVohdRy' - ModelWithTableProperties.__default_time_to_live__ = 65178 ModelWithTableProperties.__gc_grace_seconds__ = 96362 - ModelWithTableProperties.__index_interval__ = 94207 - ModelWithTableProperties.__memtable_flush_period_in_ms__ = 60210 + ModelWithTableProperties.__populate_io_cache_on_flush__ = False ModelWithTableProperties.__read_repair_chance__ = 0.2989 ModelWithTableProperties.__replicate_on_write__ = True ModelWithTableProperties.__dclocal_read_repair_chance__ = 0.12732 + if CASSANDRA_VERSION >= 20: + ModelWithTableProperties.__default_time_to_live__ = 65178 + ModelWithTableProperties.__memtable_flush_period_in_ms__ = 60210 + ModelWithTableProperties.__index_interval__ = 94207 + sync_table(ModelWithTableProperties) table_settings = management.get_table_settings(ModelWithTableProperties).options - self.assertDictContainsSubset({ - 'bloom_filter_fp_chance': 0.66778, - 'caching': CACHING_NONE, - 'comment': 'xirAkRWZVVvsmzRvXamiEcQkshkUIDINVJZgLYSdnGHweiBrAiJdLJkVohdRy', - 'default_time_to_live': 65178, - 'gc_grace_seconds': 96362, - 'index_interval': 94207, - 'memtable_flush_period_in_ms': 60210, - 'populate_io_cache_on_flush': False, - 'read_repair_chance': 0.2989, - 'replicate_on_write': True, - - # TODO see above comment re: native driver missing local read repair chance - # 'local_read_repair_chance': 0.12732, - }, table_settings) + expected = {'bloom_filter_fp_chance': 0.66778, + 'caching': CACHING_NONE, + 'comment': 'xirAkRWZVVvsmzRvXamiEcQkshkUIDINVJZgLYSdnGHweiBrAiJdLJkVohdRy', + 'gc_grace_seconds': 96362, + 'populate_io_cache_on_flush': False, + 'read_repair_chance': 0.2989, + 'replicate_on_write': True # TODO see above comment re: native driver missing local read repair chance + # 'local_read_repair_chance': 0.12732, + } + if CASSANDRA_VERSION >= 20: + expected['memtable_flush_period_in_ms'] = 60210 + expected['default_time_to_live'] = 65178 + expected['index_interval'] = 94207 + + self.assertDictContainsSubset(expected, table_settings) class SyncTableTests(BaseCassEngTestCase): diff --git a/setup.py b/setup.py index afb7e12611..cf35583c32 100644 --- a/setup.py +++ b/setup.py @@ -31,9 +31,9 @@ "Topic :: Software Development :: Libraries :: Python Modules", ], keywords='cassandra,cql,orm', - install_requires = ['cassandra-driver >= 2.1.0'], + install_requires = ['cassandra-driver >= 2.1.0', 'six >= 1.7.2'], author='Blake Eggleston, Jon Haddad', - author_email='bdeggleston@gmail.com', + author_email='bdeggleston@gmail.com, jon@jonhaddad.com', url='https://github.com/cqlengine/cqlengine', license='BSD', packages=find_packages(), From 666a4570ae56ff479f2e40a8008b06f011a86abd Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 13 Aug 2014 15:46:52 -0700 Subject: [PATCH 0871/3726] introduced six as well as using the python3 version of sorted() which no longer takes a cmp function --- cqlengine/columns.py | 1 + cqlengine/models.py | 8 ++++++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index bfe7aabd10..b0bddc8719 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -7,6 +7,7 @@ from cqlengine.exceptions import ValidationError from cassandra.encoder import cql_quote +import six class BaseValueManager(object): diff --git a/cqlengine/models.py b/cqlengine/models.py index 09612b2e7b..e34764ec96 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -631,7 +631,8 @@ def _transform_column(col_name, col_obj): attrs[col_name] = ColumnDescriptor(col_obj) column_definitions = [(k,v) for k,v in attrs.items() if isinstance(v, columns.Column)] - column_definitions = sorted(column_definitions, lambda x,y: cmp(x[1].position, y[1].position)) + #column_definitions = sorted(column_definitions, lambda x,y: cmp(x[1].position, y[1].position)) + column_definitions = sorted(column_definitions, key=lambda x: x[1].position) is_polymorphic_base = any([c[1].polymorphic_key for c in column_definitions]) @@ -765,12 +766,15 @@ def _get_polymorphic_base(bases): return klass +import six + +@six.add_metaclass(ModelMetaClass) class Model(BaseModel): """ the db name for the column family can be set as the attribute db_name, or it will be genertaed from the class name """ __abstract__ = True - __metaclass__ = ModelMetaClass + # __metaclass__ = ModelMetaClass From 6f2d013e0254095d316de6bb59222475977e190c Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 13 Aug 2014 15:55:07 -0700 Subject: [PATCH 0872/3726] dealing with itemsview --- cqlengine/models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cqlengine/models.py b/cqlengine/models.py index e34764ec96..7b9270629c 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -636,7 +636,7 @@ def _transform_column(col_name, col_obj): is_polymorphic_base = any([c[1].polymorphic_key for c in column_definitions]) - column_definitions = inherited_columns.items() + column_definitions + column_definitions = [x for x in inherited_columns.items()] + column_definitions polymorphic_columns = [c for c in column_definitions if c[1].polymorphic_key] is_polymorphic = len(polymorphic_columns) > 0 From 284e9cf2b0a1268c1a4a283093d87edcf745e704 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 13 Aug 2014 16:10:57 -0700 Subject: [PATCH 0873/3726] using the six basestring --- cqlengine/columns.py | 2 +- cqlengine/connection.py | 3 ++- cqlengine/models.py | 2 +- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index b0bddc8719..6e4ced3c50 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -240,7 +240,7 @@ def __init__(self, *args, **kwargs): def validate(self, value): value = super(Text, self).validate(value) if value is None: return - if not isinstance(value, (basestring, bytearray)) and value is not None: + if not isinstance(value, (six.string_types, bytearray)) and value is not None: raise ValidationError('{} is not a string'.format(type(value))) if self.max_length: if len(value) > self.max_length: diff --git a/cqlengine/connection.py b/cqlengine/connection.py index 4a6a426cef..444400d5b0 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -5,6 +5,7 @@ from collections import namedtuple from cassandra.cluster import Cluster, NoHostAvailable from cassandra.query import SimpleStatement, Statement +import six try: import Queue as queue @@ -102,7 +103,7 @@ def execute(query, params=None, consistency_level=None): query = str(query) query = SimpleStatement(query, consistency_level=consistency_level) - elif isinstance(query, basestring): + elif isinstance(query, six.string_types): query = SimpleStatement(query, consistency_level=consistency_level) LOG.info(query.query_string) diff --git a/cqlengine/models.py b/cqlengine/models.py index 7b9270629c..b4a2feb1c3 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -698,7 +698,7 @@ def _get_polymorphic_base(bases): if not is_abstract: raise ModelException("at least one partition key must be defined") if len(partition_keys) == 1: - pk_name = partition_keys.keys()[0] + pk_name = [x for x in partition_keys.keys()][0] attrs['pk'] = attrs[pk_name] else: # composite partition key case, get/set a tuple of values From 5c67bc8cb99b111a63a82922221599fbf8f9c7c6 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 13 Aug 2014 16:49:48 -0700 Subject: [PATCH 0874/3726] removing the unicode() calls in favor of the six library --- cqlengine/statements.py | 42 +++++++++++++++++++++-------------------- 1 file changed, 22 insertions(+), 20 deletions(-) diff --git a/cqlengine/statements.py b/cqlengine/statements.py index 549b529df7..c9112de117 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -1,5 +1,6 @@ import time from datetime import datetime, timedelta +import six from cqlengine.functions import QueryValue from cqlengine.operators import BaseWhereOperator, InOperator @@ -7,7 +8,16 @@ class StatementException(Exception): pass -class ValueQuoter(object): + +import sys + +class UnicodeMixin(object): + if sys.version_info > (3, 0): + __str__ = lambda x: x.__unicode__() + else: + __str__ = lambda x: six.text_type(x).encode('utf-8') + +class ValueQuoter(UnicodeMixin): def __init__(self, value): self.value = value @@ -29,9 +39,6 @@ def __eq__(self, other): return self.value == other.value return False - def __str__(self): - return unicode(self).encode('utf-8') - class InQuoter(ValueQuoter): @@ -40,7 +47,7 @@ def __unicode__(self): return '(' + ', '.join([cql_quote(v) for v in self.value]) + ')' -class BaseClause(object): +class BaseClause(UnicodeMixin): def __init__(self, field, value): self.field = field @@ -50,9 +57,6 @@ def __init__(self, field, value): def __unicode__(self): raise NotImplementedError - def __str__(self): - return unicode(self).encode('utf-8') - def __hash__(self): return hash(self.field) ^ hash(self.value) @@ -101,7 +105,7 @@ def __init__(self, field, operator, value, quote_field=True): def __unicode__(self): field = ('"{}"' if self.quote_field else '{}').format(self.field) - return u'{} {} {}'.format(field, self.operator, unicode(self.query_value)) + return u'{} {} {}'.format(field, self.operator, six.text_type(self.query_value)) def __hash__(self): return super(WhereClause, self).__hash__() ^ hash(self.operator) @@ -430,7 +434,7 @@ def __unicode__(self): return ', '.join(['"{}"[%({})s]'.format(self.field, self.context_id + i) for i in range(len(self._removals))]) -class BaseCQLStatement(object): +class BaseCQLStatement(UnicodeMixin): """ The base cql statement class """ def __init__(self, table, consistency=None, timestamp=None, where=None): @@ -486,7 +490,7 @@ def timestamp_normalized(self): if not self.timestamp: return None - if isinstance(self.timestamp, (int, long)): + if isinstance(self.timestamp, six.integer_types): return self.timestamp if isinstance(self.timestamp, timedelta): @@ -499,15 +503,13 @@ def timestamp_normalized(self): def __unicode__(self): raise NotImplementedError - def __str__(self): - return unicode(self).encode('utf-8') def __repr__(self): return self.__unicode__() @property def _where(self): - return 'WHERE {}'.format(' AND '.join([unicode(c) for c in self.where_clauses])) + return 'WHERE {}'.format(' AND '.join([six.text_type(c) for c in self.where_clauses])) class SelectStatement(BaseCQLStatement): @@ -533,9 +535,9 @@ def __init__(self, where=where ) - self.fields = [fields] if isinstance(fields, basestring) else (fields or []) + self.fields = [fields] if isinstance(fields, six.string_types) else (fields or []) self.count = count - self.order_by = [order_by] if isinstance(order_by, basestring) else order_by + self.order_by = [order_by] if isinstance(order_by, six.string_types) else order_by self.limit = limit self.allow_filtering = allow_filtering @@ -551,7 +553,7 @@ def __unicode__(self): qs += [self._where] if self.order_by and not self.count: - qs += ['ORDER BY {}'.format(', '.join(unicode(o) for o in self.order_by))] + qs += ['ORDER BY {}'.format(', '.join(six.text_type(o) for o in self.order_by))] if self.limit: qs += ['LIMIT {}'.format(self.limit)] @@ -658,7 +660,7 @@ def __unicode__(self): qs += ["USING {}".format(" AND ".join(using_options))] qs += ['SET'] - qs += [', '.join([unicode(c) for c in self.assignments])] + qs += [', '.join([six.text_type(c) for c in self.assignments])] if self.where_clauses: qs += [self._where] @@ -677,7 +679,7 @@ def __init__(self, table, fields=None, consistency=None, where=None, timestamp=N timestamp=timestamp ) self.fields = [] - if isinstance(fields, basestring): + if isinstance(fields, six.string_types): fields = [fields] for field in fields or []: self.add_field(field) @@ -695,7 +697,7 @@ def get_context(self): return ctx def add_field(self, field): - if isinstance(field, basestring): + if isinstance(field, six.string_types): field = FieldDeleteClause(field) if not isinstance(field, BaseClause): raise StatementException("only instances of AssignmentClause can be added to statements") From e2be28246b9d91f0ce17ba15a16e6e76f6eeef7d Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 13 Aug 2014 17:06:17 -0700 Subject: [PATCH 0875/3726] removing more __str__ in favor of the mixin --- cqlengine/columns.py | 9 +++++++++ cqlengine/operators.py | 19 ++++++++++++++----- 2 files changed, 23 insertions(+), 5 deletions(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index 6e4ced3c50..0ddc5b54a3 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -9,6 +9,15 @@ from cassandra.encoder import cql_quote import six +import sys + +# move to central spot +class UnicodeMixin(object): + if sys.version_info > (3, 0): + __str__ = lambda x: x.__unicode__() + else: + __str__ = lambda x: six.text_type(x).encode('utf-8') + class BaseValueManager(object): diff --git a/cqlengine/operators.py b/cqlengine/operators.py index 4fef0a6b5d..4776356623 100644 --- a/cqlengine/operators.py +++ b/cqlengine/operators.py @@ -1,7 +1,19 @@ +import six + + class QueryOperatorException(Exception): pass +import sys + +# move to central spot +class UnicodeMixin(object): + if sys.version_info > (3, 0): + __str__ = lambda x: x.__unicode__() + else: + __str__ = lambda x: six.text_type(x).encode('utf-8') -class BaseQueryOperator(object): + +class BaseQueryOperator(UnicodeMixin): # The symbol that identifies this operator in kwargs # ie: colname__ symbol = None @@ -14,9 +26,6 @@ def __unicode__(self): raise QueryOperatorException("cql symbol is None") return self.cql_symbol - def __str__(self): - return unicode(self).encode('utf-8') - @classmethod def get_operator(cls, symbol): if cls == BaseQueryOperator: @@ -79,4 +88,4 @@ class AssignmentOperator(BaseAssignmentOperator): class AddSymbol(BaseAssignmentOperator): - cql_symbol = "+" \ No newline at end of file + cql_symbol = "+" From 1093ac4890f2753e4f7f19afd680eb3043bb19ef Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 13 Aug 2014 17:19:33 -0700 Subject: [PATCH 0876/3726] no need for long() --- cqlengine/columns.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index 0ddc5b54a3..7d7cfb607f 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -267,7 +267,7 @@ def validate(self, value): val = super(Integer, self).validate(value) if val is None: return try: - return long(val) + return int(val) except (TypeError, ValueError): raise ValidationError("{} can't be converted to integral value".format(value)) @@ -290,7 +290,7 @@ def validate(self, value): if val is None: return try: - return long(val) + return int(val) except (TypeError, ValueError): raise ValidationError( "{} can't be converted to integral value".format(value)) @@ -353,7 +353,7 @@ def to_database(self, value): epoch = datetime(1970, 1, 1, tzinfo=value.tzinfo) offset = epoch.tzinfo.utcoffset(epoch).total_seconds() if epoch.tzinfo else 0 - return long(((value - epoch).total_seconds() - offset) * 1000) + return int(((value - epoch).total_seconds() - offset) * 1000) class Date(Column): @@ -378,7 +378,7 @@ def to_database(self, value): if not isinstance(value, date): raise ValidationError("'{}' is not a date object".format(repr(value))) - return long((value - date(1970, 1, 1)).total_seconds() * 1000) + return int((value - date(1970, 1, 1)).total_seconds() * 1000) class UUID(Column): From e64a7de36d665d0b673813726c11eb8c50338f9a Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 13 Aug 2014 17:38:49 -0700 Subject: [PATCH 0877/3726] fixed arg count --- cqlengine/query.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index 0f039f9365..819c7870df 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -405,7 +405,7 @@ def filter(self, *args, **kwargs): :rtype: AbstractQuerySet """ #add arguments to the where clause filters - if kwargs.values().count(None): + if len([x for x in kwargs.values() if x is None]): raise CQLEngineException("None values on filter are not allowed") clone = copy.deepcopy(self) From 9486baa300bcf2c3009223edbd253074029b66c4 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 13 Aug 2014 17:55:42 -0700 Subject: [PATCH 0878/3726] fixed a whole bunch of unicode references, fixed incorrect encoding of clauses --- cqlengine/functions.py | 12 ++++++++++-- cqlengine/query.py | 1 + .../tests/operators/test_where_operators.py | 13 +++++++------ .../tests/statements/test_delete_statement.py | 12 ++++++------ .../tests/statements/test_insert_statement.py | 7 ++++--- .../tests/statements/test_select_statement.py | 18 +++++++++--------- .../tests/statements/test_update_statement.py | 10 +++++----- .../tests/statements/test_where_clause.py | 3 ++- 8 files changed, 44 insertions(+), 32 deletions(-) diff --git a/cqlengine/functions.py b/cqlengine/functions.py index b5f6a5af71..004c66e5c0 100644 --- a/cqlengine/functions.py +++ b/cqlengine/functions.py @@ -1,9 +1,17 @@ from datetime import datetime from uuid import uuid1 - +import sys +import six from cqlengine.exceptions import ValidationError +# move to central spot + +class UnicodeMixin(object): + if sys.version_info > (3, 0): + __str__ = lambda x: x.__unicode__() + else: + __str__ = lambda x: six.text_type(x).encode('utf-8') -class QueryValue(object): +class QueryValue(UnicodeMixin): """ Base class for query filter values. Subclasses of these classes can be passed into .filter() keyword args diff --git a/cqlengine/query.py b/cqlengine/query.py index 819c7870df..385f922bef 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -20,6 +20,7 @@ class QueryException(CQLEngineException): pass class DoesNotExist(QueryException): pass class MultipleObjectsReturned(QueryException): pass +import six class AbstractQueryableColumn(object): """ diff --git a/cqlengine/tests/operators/test_where_operators.py b/cqlengine/tests/operators/test_where_operators.py index f8f0e8fad8..a73454c660 100644 --- a/cqlengine/tests/operators/test_where_operators.py +++ b/cqlengine/tests/operators/test_where_operators.py @@ -1,6 +1,7 @@ from unittest import TestCase from cqlengine.operators import * +import six class TestWhereOperators(TestCase): @@ -20,11 +21,11 @@ def check_lookup(symbol, expected): def test_operator_rendering(self): """ tests symbols are rendered properly """ - self.assertEqual("=", unicode(EqualsOperator())) - self.assertEqual("IN", unicode(InOperator())) - self.assertEqual(">", unicode(GreaterThanOperator())) - self.assertEqual(">=", unicode(GreaterThanOrEqualOperator())) - self.assertEqual("<", unicode(LessThanOperator())) - self.assertEqual("<=", unicode(LessThanOrEqualOperator())) + self.assertEqual("=", six.text_type(EqualsOperator())) + self.assertEqual("IN", six.text_type(InOperator())) + self.assertEqual(">", six.text_type(GreaterThanOperator())) + self.assertEqual(">=", six.text_type(GreaterThanOrEqualOperator())) + self.assertEqual("<", six.text_type(LessThanOperator())) + self.assertEqual("<=", six.text_type(LessThanOrEqualOperator())) diff --git a/cqlengine/tests/statements/test_delete_statement.py b/cqlengine/tests/statements/test_delete_statement.py index f3b0df9ffd..e658784f71 100644 --- a/cqlengine/tests/statements/test_delete_statement.py +++ b/cqlengine/tests/statements/test_delete_statement.py @@ -1,7 +1,7 @@ from unittest import TestCase from cqlengine.statements import DeleteStatement, WhereClause, MapDeleteClause from cqlengine.operators import * - +import six class DeleteStatementTests(TestCase): @@ -14,24 +14,24 @@ def test_single_field_is_listified(self): def test_field_rendering(self): """ tests that fields are properly added to the select statement """ ds = DeleteStatement('table', ['f1', 'f2']) - self.assertTrue(unicode(ds).startswith('DELETE "f1", "f2"'), unicode(ds)) + self.assertTrue(six.text_type(ds).startswith('DELETE "f1", "f2"'), six.text_type(ds)) self.assertTrue(str(ds).startswith('DELETE "f1", "f2"'), str(ds)) def test_none_fields_rendering(self): """ tests that a '*' is added if no fields are passed in """ ds = DeleteStatement('table', None) - self.assertTrue(unicode(ds).startswith('DELETE FROM'), unicode(ds)) + self.assertTrue(six.text_type(ds).startswith('DELETE FROM'), six.text_type(ds)) self.assertTrue(str(ds).startswith('DELETE FROM'), str(ds)) def test_table_rendering(self): ds = DeleteStatement('table', None) - self.assertTrue(unicode(ds).startswith('DELETE FROM table'), unicode(ds)) + self.assertTrue(six.text_type(ds).startswith('DELETE FROM table'), six.text_type(ds)) self.assertTrue(str(ds).startswith('DELETE FROM table'), str(ds)) def test_where_clause_rendering(self): ds = DeleteStatement('table', None) ds.add_where_clause(WhereClause('a', EqualsOperator(), 'b')) - self.assertEqual(unicode(ds), 'DELETE FROM table WHERE "a" = %(0)s', unicode(ds)) + self.assertEqual(six.text_type(ds), 'DELETE FROM table WHERE "a" = %(0)s', six.text_type(ds)) def test_context_update(self): ds = DeleteStatement('table', None) @@ -39,7 +39,7 @@ def test_context_update(self): ds.add_where_clause(WhereClause('a', EqualsOperator(), 'b')) ds.update_context_id(7) - self.assertEqual(unicode(ds), 'DELETE "d"[%(8)s] FROM table WHERE "a" = %(7)s') + self.assertEqual(six.text_type(ds), 'DELETE "d"[%(8)s] FROM table WHERE "a" = %(7)s') self.assertEqual(ds.get_context(), {'7': 'b', '8': 3}) def test_context(self): diff --git a/cqlengine/tests/statements/test_insert_statement.py b/cqlengine/tests/statements/test_insert_statement.py index 32bb74350d..7a165cbfe8 100644 --- a/cqlengine/tests/statements/test_insert_statement.py +++ b/cqlengine/tests/statements/test_insert_statement.py @@ -1,6 +1,7 @@ from unittest import TestCase from cqlengine.statements import InsertStatement, StatementException, AssignmentClause +import six class InsertStatementTests(TestCase): @@ -16,7 +17,7 @@ def test_statement(self): ist.add_assignment_clause(AssignmentClause('c', 'd')) self.assertEqual( - unicode(ist), + six.text_type(ist), 'INSERT INTO table ("a", "c") VALUES (%(0)s, %(1)s)' ) @@ -27,7 +28,7 @@ def test_context_update(self): ist.update_context_id(4) self.assertEqual( - unicode(ist), + six.text_type(ist), 'INSERT INTO table ("a", "c") VALUES (%(4)s, %(5)s)' ) ctx = ist.get_context() @@ -37,4 +38,4 @@ def test_additional_rendering(self): ist = InsertStatement('table', ttl=60) ist.add_assignment_clause(AssignmentClause('a', 'b')) ist.add_assignment_clause(AssignmentClause('c', 'd')) - self.assertIn('USING TTL 60', unicode(ist)) + self.assertIn('USING TTL 60', six.text_type(ist)) diff --git a/cqlengine/tests/statements/test_select_statement.py b/cqlengine/tests/statements/test_select_statement.py index 9c466485a5..62d6ce69d9 100644 --- a/cqlengine/tests/statements/test_select_statement.py +++ b/cqlengine/tests/statements/test_select_statement.py @@ -1,7 +1,7 @@ from unittest import TestCase from cqlengine.statements import SelectStatement, WhereClause from cqlengine.operators import * - +import six class SelectStatementTests(TestCase): @@ -13,31 +13,31 @@ def test_single_field_is_listified(self): def test_field_rendering(self): """ tests that fields are properly added to the select statement """ ss = SelectStatement('table', ['f1', 'f2']) - self.assertTrue(unicode(ss).startswith('SELECT "f1", "f2"'), unicode(ss)) + self.assertTrue(six.text_type(ss).startswith('SELECT "f1", "f2"'), six.text_type(ss)) self.assertTrue(str(ss).startswith('SELECT "f1", "f2"'), str(ss)) def test_none_fields_rendering(self): """ tests that a '*' is added if no fields are passed in """ ss = SelectStatement('table') - self.assertTrue(unicode(ss).startswith('SELECT *'), unicode(ss)) + self.assertTrue(six.text_type(ss).startswith('SELECT *'), six.text_type(ss)) self.assertTrue(str(ss).startswith('SELECT *'), str(ss)) def test_table_rendering(self): ss = SelectStatement('table') - self.assertTrue(unicode(ss).startswith('SELECT * FROM table'), unicode(ss)) + self.assertTrue(six.text_type(ss).startswith('SELECT * FROM table'), six.text_type(ss)) self.assertTrue(str(ss).startswith('SELECT * FROM table'), str(ss)) def test_where_clause_rendering(self): ss = SelectStatement('table') ss.add_where_clause(WhereClause('a', EqualsOperator(), 'b')) - self.assertEqual(unicode(ss), 'SELECT * FROM table WHERE "a" = %(0)s', unicode(ss)) + self.assertEqual(six.text_type(ss), 'SELECT * FROM table WHERE "a" = %(0)s', six.text_type(ss)) def test_count(self): ss = SelectStatement('table', count=True, limit=10, order_by='d') ss.add_where_clause(WhereClause('a', EqualsOperator(), 'b')) - self.assertEqual(unicode(ss), 'SELECT COUNT(*) FROM table WHERE "a" = %(0)s LIMIT 10', unicode(ss)) - self.assertIn('LIMIT', unicode(ss)) - self.assertNotIn('ORDER', unicode(ss)) + self.assertEqual(six.text_type(ss), 'SELECT COUNT(*) FROM table WHERE "a" = %(0)s LIMIT 10', six.text_type(ss)) + self.assertIn('LIMIT', six.text_type(ss)) + self.assertNotIn('ORDER', six.text_type(ss)) def test_context(self): ss = SelectStatement('table') @@ -63,7 +63,7 @@ def test_additional_rendering(self): limit=15, allow_filtering=True ) - qstr = unicode(ss) + qstr = six.text_type(ss) self.assertIn('LIMIT 15', qstr) self.assertIn('ORDER BY x, y', qstr) self.assertIn('ALLOW FILTERING', qstr) diff --git a/cqlengine/tests/statements/test_update_statement.py b/cqlengine/tests/statements/test_update_statement.py index fae69bbd8e..f75a953008 100644 --- a/cqlengine/tests/statements/test_update_statement.py +++ b/cqlengine/tests/statements/test_update_statement.py @@ -1,14 +1,14 @@ from unittest import TestCase from cqlengine.statements import UpdateStatement, WhereClause, AssignmentClause from cqlengine.operators import * - +import six class UpdateStatementTests(TestCase): def test_table_rendering(self): """ tests that fields are properly added to the select statement """ us = UpdateStatement('table') - self.assertTrue(unicode(us).startswith('UPDATE table SET'), unicode(us)) + self.assertTrue(six.text_type(us).startswith('UPDATE table SET'), six.text_type(us)) self.assertTrue(str(us).startswith('UPDATE table SET'), str(us)) def test_rendering(self): @@ -16,7 +16,7 @@ def test_rendering(self): us.add_assignment_clause(AssignmentClause('a', 'b')) us.add_assignment_clause(AssignmentClause('c', 'd')) us.add_where_clause(WhereClause('a', EqualsOperator(), 'x')) - self.assertEqual(unicode(us), 'UPDATE table SET "a" = %(0)s, "c" = %(1)s WHERE "a" = %(2)s', unicode(us)) + self.assertEqual(six.text_type(us), 'UPDATE table SET "a" = %(0)s, "c" = %(1)s WHERE "a" = %(2)s', six.text_type(us)) def test_context(self): us = UpdateStatement('table') @@ -31,12 +31,12 @@ def test_context_update(self): us.add_assignment_clause(AssignmentClause('c', 'd')) us.add_where_clause(WhereClause('a', EqualsOperator(), 'x')) us.update_context_id(3) - self.assertEqual(unicode(us), 'UPDATE table SET "a" = %(4)s, "c" = %(5)s WHERE "a" = %(3)s') + self.assertEqual(six.text_type(us), 'UPDATE table SET "a" = %(4)s, "c" = %(5)s WHERE "a" = %(3)s') self.assertEqual(us.get_context(), {'4': 'b', '5': 'd', '3': 'x'}) def test_additional_rendering(self): us = UpdateStatement('table', ttl=60) us.add_assignment_clause(AssignmentClause('a', 'b')) us.add_where_clause(WhereClause('a', EqualsOperator(), 'x')) - self.assertIn('USING TTL 60', unicode(us)) + self.assertIn('USING TTL 60', six.text_type(us)) diff --git a/cqlengine/tests/statements/test_where_clause.py b/cqlengine/tests/statements/test_where_clause.py index be81b63e6c..cd3bcd6bd5 100644 --- a/cqlengine/tests/statements/test_where_clause.py +++ b/cqlengine/tests/statements/test_where_clause.py @@ -1,4 +1,5 @@ from unittest import TestCase +import six from cqlengine.operators import EqualsOperator from cqlengine.statements import StatementException, WhereClause @@ -15,7 +16,7 @@ def test_where_clause_rendering(self): wc = WhereClause('a', EqualsOperator(), 'c') wc.set_context_id(5) - self.assertEqual('"a" = %(5)s', unicode(wc), unicode(wc)) + self.assertEqual('"a" = %(5)s', six.text_type(wc), six.text_type(wc)) self.assertEqual('"a" = %(5)s', str(wc), type(wc)) def test_equality_method(self): From 2010128d73cd664bd460ed46cfe95c806f711595 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 13 Aug 2014 18:04:42 -0700 Subject: [PATCH 0879/3726] fixed long type checking --- cqlengine/query.py | 8 ++++---- cqlengine/statements.py | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index 385f922bef..2fa5933a73 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -150,13 +150,13 @@ def execute(self): opener = 'BEGIN ' + (self.batch_type + ' ' if self.batch_type else '') + ' BATCH' if self.timestamp: - if isinstance(self.timestamp, (int, long)): + if isinstance(self.timestamp, six.integer_types): ts = self.timestamp elif isinstance(self.timestamp, (datetime, timedelta)): ts = self.timestamp if isinstance(self.timestamp, timedelta): ts += datetime.now() # Apply timedelta - ts = long(time.mktime(ts.timetuple()) * 1e+6 + ts.microsecond) + ts = int(time.mktime(ts.timetuple()) * 1e+6 + ts.microsecond) else: raise ValueError("Batch expects a long, a timedelta, or a datetime") @@ -340,7 +340,7 @@ def __getitem__(self, s): return self._result_cache[s.start:s.stop:s.step] else: #return the object at this index - s = long(s) + s = int(s) #handle negative indexing if s < 0: s += num_results @@ -520,7 +520,7 @@ def limit(self, v): Sets the limit on the number of results returned CQL has a default limit of 10,000 """ - if not (v is None or isinstance(v, (int, long))): + if not (v is None or isinstance(v, six.integer_types)): raise TypeError if v == self._limit: return self diff --git a/cqlengine/statements.py b/cqlengine/statements.py index c9112de117..f1681a1658 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -498,7 +498,7 @@ def timestamp_normalized(self): else: tmp = self.timestamp - return long(time.mktime(tmp.timetuple()) * 1e+6 + tmp.microsecond) + return int(time.mktime(tmp.timetuple()) * 1e+6 + tmp.microsecond) def __unicode__(self): raise NotImplementedError From 6093ec425c73e6623331189b84a53bc2ea384361 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 14 Aug 2014 10:30:50 -0700 Subject: [PATCH 0880/3726] fixed a bunch of unicode issues --- cqlengine/query.py | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index 2fa5933a73..cb1d9241a1 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -7,7 +7,7 @@ from cqlengine.connection import execute from cqlengine.exceptions import CQLEngineException, ValidationError -from cqlengine.functions import Token, BaseQueryFunction, QueryValue +from cqlengine.functions import Token, BaseQueryFunction, QueryValue, UnicodeMixin #CQL 3 reference: #http://www.datastax.com/docs/1.1/references/cql/index @@ -22,7 +22,7 @@ class MultipleObjectsReturned(QueryException): pass import six -class AbstractQueryableColumn(object): +class AbstractQueryableColumn(UnicodeMixin): """ exposes cql query operators through pythons builtin comparator symbols @@ -34,9 +34,6 @@ def _get_column(self): def __unicode__(self): raise NotImplementedError - def __str__(self): - return str(unicode(self)) - def _to_database(self, val): if isinstance(val, QueryValue): return val @@ -49,22 +46,22 @@ def in_(self, item): used where you'd typically want to use python's `in` operator """ - return WhereClause(unicode(self), InOperator(), item) + return WhereClause(six.text_type(self), InOperator(), item) def __eq__(self, other): - return WhereClause(unicode(self), EqualsOperator(), self._to_database(other)) + return WhereClause(six.text_type(self), EqualsOperator(), self._to_database(other)) def __gt__(self, other): - return WhereClause(unicode(self), GreaterThanOperator(), self._to_database(other)) + return WhereClause(six.text_type(self), GreaterThanOperator(), self._to_database(other)) def __ge__(self, other): - return WhereClause(unicode(self), GreaterThanOrEqualOperator(), self._to_database(other)) + return WhereClause(six.text_type(self), GreaterThanOrEqualOperator(), self._to_database(other)) def __lt__(self, other): - return WhereClause(unicode(self), LessThanOperator(), self._to_database(other)) + return WhereClause(six.text_type(self), LessThanOperator(), self._to_database(other)) def __le__(self, other): - return WhereClause(unicode(self), LessThanOrEqualOperator(), self._to_database(other)) + return WhereClause(six.text_type(self), LessThanOrEqualOperator(), self._to_database(other)) class BatchType(object): @@ -236,7 +233,7 @@ def _execute(self, q): return result def __unicode__(self): - return unicode(self._select_query()) + return six.text_type(self._select_query()) def __str__(self): return str(self.__unicode__()) From edad4342b0a5482c1e573c4f2783d8df33962d30 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 14 Aug 2014 11:19:39 -0700 Subject: [PATCH 0881/3726] six transitions --- cqlengine/columns.py | 2 +- cqlengine/management.py | 3 ++- cqlengine/models.py | 6 +++--- cqlengine/query.py | 2 +- 4 files changed, 7 insertions(+), 6 deletions(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index 7d7cfb607f..7a934830c9 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -394,7 +394,7 @@ def validate(self, value): if val is None: return from uuid import UUID as _UUID if isinstance(val, _UUID): return val - if isinstance(val, basestring) and self.re_uuid.match(val): + if isinstance(val, six.string_types) and self.re_uuid.match(val): return _UUID(val) raise ValidationError("{} is not a valid uuid".format(value)) diff --git a/cqlengine/management.py b/cqlengine/management.py index 095e41b4c5..502038ea32 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -1,5 +1,6 @@ import json import warnings +import six from cqlengine import SizeTieredCompactionStrategy, LeveledCompactionStrategy from cqlengine import ONE from cqlengine.named import NamedTable @@ -166,7 +167,7 @@ def add_column(col): prop_value = getattr(model, '__{}__'.format(prop_name), None) if prop_value is not None: # Strings needs to be single quoted - if isinstance(prop_value, basestring): + if isinstance(prop_value, six.string_types): prop_value = "'{}'".format(prop_value) with_qs.append("{} = {}".format(prop_name, prop_value)) diff --git a/cqlengine/models.py b/cqlengine/models.py index b4a2feb1c3..cbb1485dea 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -299,7 +299,7 @@ def __repr__(self): Pretty printing of models by their primary key """ return '{} <{}>'.format(self.__class__.__name__, - ', '.join(('{}={}'.format(k, getattr(self, k)) for k,v in self._primary_keys.iteritems())) + ', '.join(('{}={}'.format(k, getattr(self, k)) for k,v in six.iteritems(self._primary_keys))) ) @@ -451,7 +451,7 @@ def __iter__(self): def __getitem__(self, key): """ Returns column's value. """ - if not isinstance(key, basestring): + if not isinstance(key, six.string_types): raise TypeError if key not in self._columns.keys(): raise KeyError @@ -459,7 +459,7 @@ def __getitem__(self, key): def __setitem__(self, key, val): """ Sets a column's value. """ - if not isinstance(key, basestring): + if not isinstance(key, six.string_types): raise TypeError if key not in self._columns.keys(): raise KeyError diff --git a/cqlengine/query.py b/cqlengine/query.py index cb1d9241a1..e86a43160c 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -577,7 +577,7 @@ def delete(self): Deletes the contents of a query """ #validate where clause - partition_key = self.model._primary_keys.values()[0] + partition_key = [x for x in self.model._primary_keys.values()][0] if not any([c.field == partition_key.column_name for c in self._where]): raise QueryException("The partition key must be defined on delete queries") From a93a41ee1bc257759c6b5427685d4374da856f2a Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 14 Aug 2014 12:11:35 -0700 Subject: [PATCH 0882/3726] removing longs and fixing iterators --- cqlengine/functions.py | 4 ++-- cqlengine/query.py | 2 +- cqlengine/tests/columns/test_container_columns.py | 3 ++- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/cqlengine/functions.py b/cqlengine/functions.py index 004c66e5c0..d146290722 100644 --- a/cqlengine/functions.py +++ b/cqlengine/functions.py @@ -64,7 +64,7 @@ def __init__(self, value): def to_database(self, val): epoch = datetime(1970, 1, 1, tzinfo=val.tzinfo) offset = epoch.tzinfo.utcoffset(epoch).total_seconds() if epoch.tzinfo else 0 - return long(((val - epoch).total_seconds() - offset) * 1000) + return int(((val - epoch).total_seconds() - offset) * 1000) def update_context(self, ctx): ctx[str(self.context_id)] = self.to_database(self.value) @@ -91,7 +91,7 @@ def __init__(self, value): def to_database(self, val): epoch = datetime(1970, 1, 1, tzinfo=val.tzinfo) offset = epoch.tzinfo.utcoffset(epoch).total_seconds() if epoch.tzinfo else 0 - return long(((val - epoch).total_seconds() - offset) * 1000) + return int(((val - epoch).total_seconds() - offset) * 1000) def update_context(self, ctx): ctx[str(self.context_id)] = self.to_database(self.value) diff --git a/cqlengine/query.py b/cqlengine/query.py index e86a43160c..996526022d 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -368,7 +368,7 @@ def batch(self, batch_obj): def first(self): try: - return iter(self).next() + return six.next(iter(self)) except StopIteration: return None diff --git a/cqlengine/tests/columns/test_container_columns.py b/cqlengine/tests/columns/test_container_columns.py index 0b756f3316..1b9200f0bd 100644 --- a/cqlengine/tests/columns/test_container_columns.py +++ b/cqlengine/tests/columns/test_container_columns.py @@ -1,6 +1,7 @@ from datetime import datetime, timedelta import json from uuid import uuid4 +import six from cqlengine import Model, ValidationError from cqlengine import columns @@ -22,7 +23,7 @@ class JsonTestColumn(columns.Column): def to_python(self, value): if value is None: return - if isinstance(value, basestring): + if isinstance(value, six.string_types): return json.loads(value) else: return value From d6ed6073f72bce0fd111343e6969c16b0b8d31c6 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 14 Aug 2014 12:22:36 -0700 Subject: [PATCH 0883/3726] fixed iterators --- cqlengine/tests/query/test_named.py | 4 ++-- cqlengine/tests/query/test_queryset.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cqlengine/tests/query/test_named.py b/cqlengine/tests/query/test_named.py index 38df15420a..7e0ccc3d7b 100644 --- a/cqlengine/tests/query/test_named.py +++ b/cqlengine/tests/query/test_named.py @@ -184,8 +184,8 @@ def test_multiple_iterators_are_isolated(self): iter1 = iter(q) iter2 = iter(q) for attempt_id in expected_order: - assert iter1.next().attempt_id == attempt_id - assert iter2.next().attempt_id == attempt_id + assert next(iter1).attempt_id == attempt_id + assert next(iter2).attempt_id == attempt_id def test_get_success_case(self): """ diff --git a/cqlengine/tests/query/test_queryset.py b/cqlengine/tests/query/test_queryset.py index 1c9a3ed5f9..6482cc7f87 100644 --- a/cqlengine/tests/query/test_queryset.py +++ b/cqlengine/tests/query/test_queryset.py @@ -315,8 +315,8 @@ def test_multiple_iterators_are_isolated(self): iter1 = iter(q) iter2 = iter(q) for attempt_id in expected_order: - assert iter1.next().attempt_id == attempt_id - assert iter2.next().attempt_id == attempt_id + assert next(iter1).attempt_id == attempt_id + assert next(iter2).attempt_id == attempt_id def test_get_success_case(self): """ From ce6c9a69e20b2678525a25541a3ff59b2e61f573 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 14 Aug 2014 13:36:52 -0700 Subject: [PATCH 0884/3726] putting assertItemsEqual on the base test class for python > 3 --- cqlengine/tests/base.py | 6 ++++++ cqlengine/tests/model/test_class_construction.py | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/cqlengine/tests/base.py b/cqlengine/tests/base.py index 62aee389c6..dea0cc191b 100644 --- a/cqlengine/tests/base.py +++ b/cqlengine/tests/base.py @@ -1,5 +1,7 @@ from unittest import TestCase import os +import sys +import six from cqlengine.connection import get_session CASSANDRA_VERSION = int(os.environ['CASSANDRA_VERSION']) @@ -22,3 +24,7 @@ def assertHasAttr(self, obj, attr): def assertNotHasAttr(self, obj, attr): self.assertFalse(hasattr(obj, attr), "{} shouldn't have the attribute: {}".format(obj, attr)) + + if sys.version_info > (3, 0): + def assertItemsEqual(self, first, second, msg=None): + return self.assertCountEqual(first, second, msg) diff --git a/cqlengine/tests/model/test_class_construction.py b/cqlengine/tests/model/test_class_construction.py index 37f0f81965..7ae758746b 100644 --- a/cqlengine/tests/model/test_class_construction.py +++ b/cqlengine/tests/model/test_class_construction.py @@ -73,7 +73,7 @@ class Stuff(Model): content = columns.Text() numbers = columns.Integer() - self.assertEquals(Stuff._columns.keys(), ['id', 'words', 'content', 'numbers']) + self.assertEquals([x for x in Stuff._columns.keys()], ['id', 'words', 'content', 'numbers']) def test_exception_raised_when_creating_class_without_pk(self): with self.assertRaises(ModelDefinitionException): From c6b7c38635c8eb65ee77dd191d81b3ac987cf2f2 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 14 Aug 2014 13:41:33 -0700 Subject: [PATCH 0885/3726] removal of maxint in python 3 --- cqlengine/tests/columns/test_validation.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cqlengine/tests/columns/test_validation.py b/cqlengine/tests/columns/test_validation.py index 1eb2b232dc..80fcba3979 100644 --- a/cqlengine/tests/columns/test_validation.py +++ b/cqlengine/tests/columns/test_validation.py @@ -6,6 +6,7 @@ from unittest import TestCase from uuid import uuid4, uuid1 from cassandra import InvalidRequest +import six from cqlengine import ValidationError from cqlengine.connection import execute @@ -117,7 +118,7 @@ def tearDownClass(cls): sync_table(cls.VarIntTest) def test_varint_io(self): - long_int = sys.maxint + 1 + long_int = 92834902384092834092384028340283048239048203480234823048230482304820348239 int1 = self.VarIntTest.objects.create(test_id=0, bignum=long_int) int2 = self.VarIntTest.objects(test_id=0).first() assert int1.bignum == int2.bignum From bd9ad65c7e5721e22fa1d450825cee685f3af267 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 14 Aug 2014 13:51:11 -0700 Subject: [PATCH 0886/3726] removing sys.maxint --- cqlengine/tests/columns/test_validation.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cqlengine/tests/columns/test_validation.py b/cqlengine/tests/columns/test_validation.py index 80fcba3979..045b8d8176 100644 --- a/cqlengine/tests/columns/test_validation.py +++ b/cqlengine/tests/columns/test_validation.py @@ -118,10 +118,12 @@ def tearDownClass(cls): sync_table(cls.VarIntTest) def test_varint_io(self): + # TODO: this is a weird test. i changed the number from sys.maxint (which doesn't exist in python 3) + # to the giant number below and it broken between runs. long_int = 92834902384092834092384028340283048239048203480234823048230482304820348239 int1 = self.VarIntTest.objects.create(test_id=0, bignum=long_int) int2 = self.VarIntTest.objects(test_id=0).first() - assert int1.bignum == int2.bignum + self.assertEqual(int1.bignum, int2.bignum) class TestDate(BaseCassEngTestCase): From 70bab4ffeb233fe572cb2e0031c4f99626b09b34 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 14 Aug 2014 14:20:23 -0700 Subject: [PATCH 0887/3726] listify the range --- cqlengine/tests/columns/test_container_columns.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cqlengine/tests/columns/test_container_columns.py b/cqlengine/tests/columns/test_container_columns.py index 1b9200f0bd..03cbd7452f 100644 --- a/cqlengine/tests/columns/test_container_columns.py +++ b/cqlengine/tests/columns/test_container_columns.py @@ -226,7 +226,7 @@ def test_element_count_validation(self): def test_partial_updates(self): """ Tests that partial udpates work as expected """ - final = range(10) + final = list(range(10)) initial = final[3:7] m1 = TestListModel.create(int_list=initial) From fd71963ccc13640b58aef561cd98cd172d5a110a Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 14 Aug 2014 17:05:54 -0700 Subject: [PATCH 0888/3726] fixing up the bytes column, now blob --- cqlengine/columns.py | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index 7a934830c9..54902337f7 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -213,23 +213,22 @@ def _val_is_null(self, val): return val is None -class Bytes(Column): +class Blob(Column): db_type = 'blob' - class Quoter(ValueQuoter): - def __str__(self): - return '0x' + self.value.encode('hex') - def to_database(self, value): - val = super(Bytes, self).to_database(value) - if val is None: return - return self.Quoter(val) + if not isinstance(value, six.binary_type): + raise Exception("expecting a binary, got a %s" % type(value)) + + val = super(Bytes, self).to_database(value) + return six.b(val) def to_python(self, value): #return value[2:].decode('hex') - return value + return six.u(value) +Bytes = Blob class Ascii(Column): db_type = 'ascii' From 770c1009d1cf80004b5aedfe4866ca1a24659aa4 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 14 Aug 2014 18:08:13 -0700 Subject: [PATCH 0889/3726] bytes column explicitly only accepts bytes now --- cqlengine/columns.py | 4 ++-- cqlengine/tests/columns/test_value_io.py | 7 ++++--- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index 54902337f7..e3413a08e0 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -222,11 +222,11 @@ def to_database(self, value): raise Exception("expecting a binary, got a %s" % type(value)) val = super(Bytes, self).to_database(value) - return six.b(val) + return bytearray(val) def to_python(self, value): #return value[2:].decode('hex') - return six.u(value) + return value Bytes = Blob diff --git a/cqlengine/tests/columns/test_value_io.py b/cqlengine/tests/columns/test_value_io.py index 1515c25a26..45bc59455f 100644 --- a/cqlengine/tests/columns/test_value_io.py +++ b/cqlengine/tests/columns/test_value_io.py @@ -1,6 +1,7 @@ from datetime import datetime, timedelta from decimal import Decimal from uuid import uuid1, uuid4, UUID +import six from cqlengine.tests.base import BaseCassEngTestCase @@ -80,9 +81,9 @@ def test_column_io(self): class TestBlobIO(BaseColumnIOTest): - column = columns.Bytes - pkey_val = 'blake', uuid4().bytes - data_val = 'eggleston', uuid4().bytes + column = columns.Blob + pkey_val = six.b('blake'), uuid4().bytes + data_val = six.b('eggleston'), uuid4().bytes class TestTextIO(BaseColumnIOTest): From 899c2907799b95b1d0ccf2fa85e79f13f701c7b0 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 14 Aug 2014 18:22:05 -0700 Subject: [PATCH 0890/3726] fixed test which wasn't providing a specific encoding on a bytearray --- cqlengine/tests/columns/test_validation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cqlengine/tests/columns/test_validation.py b/cqlengine/tests/columns/test_validation.py index 045b8d8176..dc14eb6150 100644 --- a/cqlengine/tests/columns/test_validation.py +++ b/cqlengine/tests/columns/test_validation.py @@ -294,7 +294,7 @@ def test_max_length(self): def test_type_checking(self): Text().validate('string') Text().validate(u'unicode') - Text().validate(bytearray('bytearray')) + Text().validate(bytearray('bytearray', encoding='ascii')) with self.assertRaises(ValidationError): Text(required=True).validate(None) From 6fc8b7cfd53790df6b90625f6f1c0c92a4081ca9 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 14 Aug 2014 18:24:41 -0700 Subject: [PATCH 0891/3726] fixed python 3 print --- cqlengine/tests/test_load.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cqlengine/tests/test_load.py b/cqlengine/tests/test_load.py index 6e64d96d87..0db4243bf6 100644 --- a/cqlengine/tests/test_load.py +++ b/cqlengine/tests/test_load.py @@ -20,12 +20,12 @@ def test_lots_of_queries(): gc.collect() objgraph.show_most_common_types() - print "Starting..." + print("Starting...") for i in range(1000000): if i % 25000 == 0: # print memory statistic - print "Memory usage: %s" % (resource.getrusage(resource.RUSAGE_SELF).ru_maxrss) + print("Memory usage: %s" % (resource.getrusage(resource.RUSAGE_SELF).ru_maxrss)) LoadTest.create(k=i, v=i) From d4518f4d1f7498403399c6d36cd2f61e4464b452 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 14 Aug 2014 18:47:12 -0700 Subject: [PATCH 0892/3726] documentation around development --- docs/topics/development.rst | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) create mode 100644 docs/topics/development.rst diff --git a/docs/topics/development.rst b/docs/topics/development.rst new file mode 100644 index 0000000000..a7390e3438 --- /dev/null +++ b/docs/topics/development.rst @@ -0,0 +1,34 @@ +================== +Development +================== + +Tests are run using Travis CI using a Matrix to test different Cassandra and Python versions. Only Pull Requests that have passed the entire matrix will be considered for merge into the main codebase. + +Python versions: + +- 2.7 +- 3.4 + +Cassandra vesions: + +- 1.2 (protocol_version 1) +- 2.0 (protocol_version 2) +- 2.1 (upcoming, protocol_version 3) + + +Testing Locally +================= + +Before testing, you'll need to set an environment variable to the version of Cassandra that's being tested. The version cooresponds to the release, so for example if you're testing against Cassandra 2.1, you'd set the following: + + export CASSANDRA_VERSION=20 + +At the command line, execute: + + bin/test.py + +This is a wrapper for nose that also sets up the database connection. + + + + From e116f134152b61f52debbd4ca1ef8272e945ae35 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 14 Aug 2014 18:49:36 -0700 Subject: [PATCH 0893/3726] updated documentation index --- docs/index.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/index.rst b/docs/index.rst index deda30ef0e..77ee31e926 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -30,6 +30,7 @@ Contents: topics/connection topics/manage_schemas topics/faq + topics/development .. _getting-started: From afec245a8ec752bd837a1e309dde30ef75ed0557 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 14 Aug 2014 18:49:58 -0700 Subject: [PATCH 0894/3726] moved dev up in index --- docs/index.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/index.rst b/docs/index.rst index 77ee31e926..a2f0cae417 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -29,8 +29,8 @@ Contents: topics/columns topics/connection topics/manage_schemas - topics/faq topics/development + topics/faq .. _getting-started: From 2d442691a7b9dded25a61f951639c090f5b48a31 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 14 Aug 2014 18:51:13 -0700 Subject: [PATCH 0895/3726] fixing documentation errors --- docs/index.rst | 2 +- docs/topics/development.rst | 8 ++++++-- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/docs/index.rst b/docs/index.rst index a2f0cae417..cbd71dbf41 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -98,7 +98,7 @@ Getting Started example5 -`Report a Bug `_ +`Report a Bug `_ `Users Mailing List `_ diff --git a/docs/topics/development.rst b/docs/topics/development.rst index a7390e3438..3f7e2d32cf 100644 --- a/docs/topics/development.rst +++ b/docs/topics/development.rst @@ -21,11 +21,15 @@ Testing Locally Before testing, you'll need to set an environment variable to the version of Cassandra that's being tested. The version cooresponds to the release, so for example if you're testing against Cassandra 2.1, you'd set the following: - export CASSANDRA_VERSION=20 + .. code-block::bash + + export CASSANDRA_VERSION=20 At the command line, execute: - bin/test.py + .. code-block::bash + + bin/test.py This is a wrapper for nose that also sets up the database connection. From 05e681b5328c0acde7381dc402eca304c9008f09 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 14 Aug 2014 18:52:47 -0700 Subject: [PATCH 0896/3726] pull requests, contributing guidelines --- docs/topics/development.rst | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/docs/topics/development.rst b/docs/topics/development.rst index 3f7e2d32cf..d785ab05d3 100644 --- a/docs/topics/development.rst +++ b/docs/topics/development.rst @@ -2,7 +2,10 @@ Development ================== -Tests are run using Travis CI using a Matrix to test different Cassandra and Python versions. Only Pull Requests that have passed the entire matrix will be considered for merge into the main codebase. +Travis CI +================ + +Tests are run using Travis CI using a Matrix to test different Cassandra and Python versions. Python versions: @@ -15,6 +18,12 @@ Cassandra vesions: - 2.0 (protocol_version 2) - 2.1 (upcoming, protocol_version 3) +Pull Requests +=============== +Only Pull Requests that have passed the entire matrix will be considered for merge into the main codebase. + +Please see the . + Testing Locally ================= From 4cf88cf6e4819b2042f06edaa40b0d75bf718876 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 14 Aug 2014 18:53:28 -0700 Subject: [PATCH 0897/3726] formatting fix --- docs/topics/development.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/topics/development.rst b/docs/topics/development.rst index d785ab05d3..83236ebf2e 100644 --- a/docs/topics/development.rst +++ b/docs/topics/development.rst @@ -22,7 +22,7 @@ Pull Requests =============== Only Pull Requests that have passed the entire matrix will be considered for merge into the main codebase. -Please see the . +Please see the contributing guidelines: https://github.com/cqlengine/cqlengine/blob/master/CONTRIBUTING.md Testing Locally From 20ece79adfc833d38a7144b011e4fca6ce74c0cc Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 14 Aug 2014 18:59:59 -0700 Subject: [PATCH 0898/3726] updated development docs with travis location --- docs/topics/development.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/topics/development.rst b/docs/topics/development.rst index 83236ebf2e..cc5c6de73f 100644 --- a/docs/topics/development.rst +++ b/docs/topics/development.rst @@ -5,7 +5,7 @@ Development Travis CI ================ -Tests are run using Travis CI using a Matrix to test different Cassandra and Python versions. +Tests are run using Travis CI using a Matrix to test different Cassandra and Python versions. It is located here: https://travis-ci.org/cqlengine/cqlengine Python versions: From 689f1ed23c826d4d7cfa47fb604280b5bf816bce Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 14 Aug 2014 19:05:02 -0700 Subject: [PATCH 0899/3726] starting on external resources --- docs/topics/external_resources.rst | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) create mode 100644 docs/topics/external_resources.rst diff --git a/docs/topics/external_resources.rst b/docs/topics/external_resources.rst new file mode 100644 index 0000000000..896ff29115 --- /dev/null +++ b/docs/topics/external_resources.rst @@ -0,0 +1,20 @@ +============================ +External Resources +============================ + +Video Tutorials +================ + +Introduction to CQLEngine: https://www.youtube.com/watch?v=zrbQcPNMbB0 + +TimeUUID and Table Polymorphism: https://www.youtube.com/watch?v=clXN9pnakvI + + + + + + + + + + From fc2d4ea8cbda9c59b61325b867ca2925ce928750 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 14 Aug 2014 19:06:13 -0700 Subject: [PATCH 0900/3726] table polymorphism blog posts --- docs/topics/external_resources.rst | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docs/topics/external_resources.rst b/docs/topics/external_resources.rst index 896ff29115..811d1975a6 100644 --- a/docs/topics/external_resources.rst +++ b/docs/topics/external_resources.rst @@ -10,8 +10,10 @@ Introduction to CQLEngine: https://www.youtube.com/watch?v=zrbQcPNMbB0 TimeUUID and Table Polymorphism: https://www.youtube.com/watch?v=clXN9pnakvI +Blog Posts +=========== - +Table Polymorphism: http://blakeeggleston.com/cqlengine-08-released.html From 0096d3764cca9143eceb7830c0dd7ba6d6de84ed Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 14 Aug 2014 19:13:54 -0700 Subject: [PATCH 0901/3726] external resources added to index --- docs/index.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/index.rst b/docs/index.rst index cbd71dbf41..d927b25ab4 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -29,6 +29,7 @@ Contents: topics/columns topics/connection topics/manage_schemas + topics/external_resources topics/development topics/faq From a270d08e88d1c622443843423e893a5d288714b4 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 14 Aug 2014 19:35:25 -0700 Subject: [PATCH 0902/3726] attribution to blake for blog post --- docs/topics/external_resources.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/topics/external_resources.rst b/docs/topics/external_resources.rst index 811d1975a6..58da23d9c3 100644 --- a/docs/topics/external_resources.rst +++ b/docs/topics/external_resources.rst @@ -13,7 +13,7 @@ TimeUUID and Table Polymorphism: https://www.youtube.com/watch?v=clXN9pnakvI Blog Posts =========== -Table Polymorphism: http://blakeeggleston.com/cqlengine-08-released.html +Blake Eggleston on Table Polymorphism in the .8 release: http://blakeeggleston.com/cqlengine-08-released.html From bde67148eb109af63a3f6242d15909d6d5331329 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 14 Aug 2014 19:42:53 -0700 Subject: [PATCH 0903/3726] related projects page --- docs/index.rst | 1 + docs/topics/related_projects.rst | 22 ++++++++++++++++++++++ 2 files changed, 23 insertions(+) create mode 100644 docs/topics/related_projects.rst diff --git a/docs/index.rst b/docs/index.rst index d927b25ab4..c02138cd34 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -30,6 +30,7 @@ Contents: topics/connection topics/manage_schemas topics/external_resources + topics/related_projects topics/development topics/faq diff --git a/docs/topics/related_projects.rst b/docs/topics/related_projects.rst new file mode 100644 index 0000000000..eff06b3442 --- /dev/null +++ b/docs/topics/related_projects.rst @@ -0,0 +1,22 @@ +================================== +Related Projects +================================== + +Cassandra Native Driver +========================= + +- Docs: http://datastax.github.io/python-driver/api/index.html +- Github: https://github.com/datastax/python-driver +- Pypi: https://pypi.python.org/pypi/cassandra-driver/2.1.0 + + +Sphinx Contrib Module +========================= + +- Github https://github.com/dokai/sphinxcontrib-cqlengine + + +Django Integration +========================= + +- Github https://cqlengine.readthedocs.org From 28d3b0b56cd08c573415ad5db1bbe0066422f7bb Mon Sep 17 00:00:00 2001 From: "mission.liao" Date: Fri, 15 Aug 2014 11:48:31 +0800 Subject: [PATCH 0904/3726] if not exist only supported by cassandra 2.0 or later. --- cqlengine/tests/test_ifnotexists.py | 39 ++++++++++++++++------------- 1 file changed, 22 insertions(+), 17 deletions(-) diff --git a/cqlengine/tests/test_ifnotexists.py b/cqlengine/tests/test_ifnotexists.py index ca23c49c60..94af055e42 100644 --- a/cqlengine/tests/test_ifnotexists.py +++ b/cqlengine/tests/test_ifnotexists.py @@ -1,5 +1,5 @@ from cqlengine.management import sync_table, drop_table, create_keyspace, delete_keyspace -from cqlengine.tests.base import BaseCassEngTestCase +from cqlengine.tests.base import BaseCassEngTestCase, CASSANDRA_VERSION from cqlengine.models import Model from cqlengine import columns from uuid import uuid4 @@ -41,31 +41,35 @@ class IfNotExistsInsertTests(BaseIfNotExistsTest): def test_insert_if_not_exists_success(self): """ tests that insertion with if_not_exists work as expected """ - id = uuid4() - TestIfNotExistsModel.create(id=id, count=8, text='123456789') - TestIfNotExistsModel.if_not_exists(True).create(id=id, count=9, text='111111111111') + if CASSANDRA_VERSION >= 20: + id = uuid4() - q = TestIfNotExistsModel.objects(id=id) - self.assertEqual(len(q), 1) + TestIfNotExistsModel.create(id=id, count=8, text='123456789') + TestIfNotExistsModel.if_not_exists(True).create(id=id, count=9, text='111111111111') - tm = q.first() - self.assertEquals(tm.count, 8) - self.assertEquals(tm.text, '123456789') + q = TestIfNotExistsModel.objects(id=id) + self.assertEqual(len(q), 1) + + tm = q.first() + self.assertEquals(tm.count, 8) + self.assertEquals(tm.text, '123456789') def test_insert_if_not_exists_failure(self): """ tests that insertion with if_not_exists failure """ - id = uuid4() - TestIfNotExistsModel.create(id=id, count=8, text='123456789') - TestIfNotExistsModel.if_not_exists(False).create(id=id, count=9, text='111111111111') + if CASSANDRA_VERSION >= 20: + id = uuid4() + + TestIfNotExistsModel.create(id=id, count=8, text='123456789') + TestIfNotExistsModel.if_not_exists(False).create(id=id, count=9, text='111111111111') - q = TestIfNotExistsModel.objects(id=id) - self.assertEquals(len(q), 1) + q = TestIfNotExistsModel.objects(id=id) + self.assertEquals(len(q), 1) - tm = q.first() - self.assertEquals(tm.count, 9) - self.assertEquals(tm.text, '111111111111') + tm = q.first() + self.assertEquals(tm.count, 9) + self.assertEquals(tm.text, '111111111111') class IfNotExistsModelTest(BaseIfNotExistsTest): @@ -82,6 +86,7 @@ def test_if_not_exists_included_on_create(self): self.assertIn("IF NOT EXISTS", query) def test_if_not_exists_included_on_save(self): + """ tests if we correctly put 'IF NOT EXISTS' for insert statement """ session = get_session() From 97e6426468fe065d21b1ae04ab86daca7baa58b3 Mon Sep 17 00:00:00 2001 From: "mission.liao" Date: Fri, 15 Aug 2014 11:59:44 +0800 Subject: [PATCH 0905/3726] if not exist is a new feature of cassandra 2.0 --- docs/topics/models.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/topics/models.rst b/docs/topics/models.rst index 1a667a8b3c..be324ef1b0 100644 --- a/docs/topics/models.rst +++ b/docs/topics/models.rst @@ -191,6 +191,8 @@ Model Methods object is determined by its primary key(s). And please note using this flag would incur performance cost. + This method supported on Cassandra 2.0 or later. + .. method:: update(**values) Performs an update on the model instance. You can pass in values to set on the model From 23edc1410208d4b508308b7e72af507ce9840332 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Fri, 15 Aug 2014 08:24:23 -0700 Subject: [PATCH 0906/3726] docs around unlogged batches --- docs/topics/queryset.rst | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/docs/topics/queryset.rst b/docs/topics/queryset.rst index e1e172645a..ba02f1bb34 100644 --- a/docs/topics/queryset.rst +++ b/docs/topics/queryset.rst @@ -285,6 +285,8 @@ Batch Queries cqlengine now supports batch queries using the BatchQuery class. Batch queries can be started and stopped manually, or within a context manager. To add queries to the batch object, you just need to precede the create/save/delete call with a call to batch, and pass in the batch object. + + Batch Query General Use Pattern ------------------------------- @@ -375,6 +377,19 @@ Batch Query Execution Callbacks Failure in any of the callbacks does not affect the batch's execution, as the callbacks are started after the execution of the batch is complete. +Logged vs Unlogged +==================== + + By default, queries in cqlengine are LOGGED, which carries additional overhead from UNLOGGED. To explicitly state which batch type to use, simply: + + + .. code-block:: python + + from cqlengine.query import BatchType + with BatchQuery(batch_type=BatchType.Unlogged) as b: + LogEntry.batch(b).create(k=1, v=1) + LogEntry.batch(b).create(k=1, v=2) + QuerySet method reference From 499d1aa3121a8bd724a4f07d4104aebc3ca4dc3f Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Fri, 15 Aug 2014 08:40:23 -0700 Subject: [PATCH 0907/3726] improved documentation around blind updates --- docs/topics/queryset.rst | 27 +++++++++++++++++++++++---- 1 file changed, 23 insertions(+), 4 deletions(-) diff --git a/docs/topics/queryset.rst b/docs/topics/queryset.rst index ba02f1bb34..d891c27d68 100644 --- a/docs/topics/queryset.rst +++ b/docs/topics/queryset.rst @@ -377,9 +377,8 @@ Batch Query Execution Callbacks Failure in any of the callbacks does not affect the batch's execution, as the callbacks are started after the execution of the batch is complete. -Logged vs Unlogged -==================== - +Logged vs Unlogged Batches +--------------------------- By default, queries in cqlengine are LOGGED, which carries additional overhead from UNLOGGED. To explicitly state which batch type to use, simply: @@ -460,11 +459,31 @@ QuerySet method reference update like so: .. code-block:: python + Model.objects(key=n).update(value='x') Passing in updates for columns which are not part of the model will raise a ValidationError. Per column validation will be performed, but instance level validation will not - (`Model.validate` is not called). + (`Model.validate` is not called). This is sometimes referred to as a blind update. + + For example: + + .. code-block:: python + + class User(Model): + id = Integer(primary_key=True) + name = Text() + + setup(["localhost"], "test") + sync_table(User) + + u = User.create(id=1, name="jon") + + User.objects(id=1).update(name="Steve") + + # sets name to null + User.objects(id=1).update(name=None) + The queryset update method also supports blindly adding and removing elements from container columns, without loading a model instance from Cassandra. From 6b68c32b972eb263a2ddc726db9191fee76660ff Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Fri, 15 Aug 2014 09:20:51 -0700 Subject: [PATCH 0908/3726] linking to blind updates from instance update --- docs/topics/models.rst | 2 ++ docs/topics/queryset.rst | 2 ++ 2 files changed, 4 insertions(+) diff --git a/docs/topics/models.rst b/docs/topics/models.rst index 7beda0e40f..d21c6aa8fa 100644 --- a/docs/topics/models.rst +++ b/docs/topics/models.rst @@ -192,6 +192,8 @@ Model Methods fields. If no fields on the model have been modified since loading, no query will be performed. Model validation is performed normally. + It is possible to do a blind update, that is, to update a field without having first selected the object out of the database. See :ref:`Blind Updates ` + .. method:: get_changed_columns() Returns a list of column names that have changed since the model was instantiated or saved diff --git a/docs/topics/queryset.rst b/docs/topics/queryset.rst index d891c27d68..3a523e7e2d 100644 --- a/docs/topics/queryset.rst +++ b/docs/topics/queryset.rst @@ -453,6 +453,8 @@ QuerySet method reference Sets the ttl to run the query query with. Note that running a select query with a ttl value will raise an exception + .. _blind_updates: + .. method:: update(**values) Performs an update on the row selected by the queryset. Include values to update in the From 79b57bc2a8e45347f261aac806eb117027de653b Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Fri, 15 Aug 2014 09:43:40 -0700 Subject: [PATCH 0909/3726] improved queryset docs --- docs/topics/queryset.rst | 49 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) diff --git a/docs/topics/queryset.rst b/docs/topics/queryset.rst index 3a523e7e2d..d03a54a426 100644 --- a/docs/topics/queryset.rst +++ b/docs/topics/queryset.rst @@ -400,6 +400,12 @@ QuerySet method reference Returns a queryset matching all rows + .. code-block:: python + + for user in User.objects().all(): + print(user) + + .. method:: batch(batch_object) Sets the batch object to run the query on. Note that running a select query with a batch object will raise an exception @@ -408,11 +414,20 @@ QuerySet method reference Sets the consistency level for the operation. Options may be imported from the top level :attr:`cqlengine` package. + .. code-block:: python + + for user in User.objects(id=3).consistency(ONE): + print(user) + .. method:: count() Returns the number of matching rows in your QuerySet + .. code-block:: python + + print(User.objects().count()) + .. method:: filter(\*\*values) :param values: See :ref:`retrieving-objects-with-filters` @@ -425,12 +440,22 @@ QuerySet method reference Returns a single object matching the QuerySet. If no objects are matched, a :attr:`~models.Model.DoesNotExist` exception is raised. If more than one object is found, a :attr:`~models.Model.MultipleObjectsReturned` exception is raised. + .. code-block:: python + + user = User.get(id=1) + + .. method:: limit(num) Limits the number of results returned by Cassandra. *Note that CQL's default limit is 10,000, so all queries without a limit set explicitly will have an implicit limit of 10,000* + .. code-block:: python + + for user in User.objects().limit(100): + print(user) + .. method:: order_by(field_name) :param field_name: the name of the field to order on. *Note: the field_name must be a clustering key* @@ -438,6 +463,30 @@ QuerySet method reference Sets the field to order on. + .. code-block:: python + + from uuid import uuid1,uuid4 + + class Comment(Model): + photo_id = UUID(primary_key=True) + comment_id = TimeUUID(primary_key=True, default=uuid1) # auto becomes clustering key + comment = Text() + + sync_table(Comment) + + u = uuid4() + for x in range(5): + Comment.create(photo_id=u, comment="test %d" % x) + + print("Normal") + for comment in Comment.objects(photo_id=u): + print comment.comment_id + + print("Reversed") + for comment in Comment.objects(photo_id=u).order_by("-comment_id"): + print comment.comment_id + + .. method:: allow_filtering() Enables the (usually) unwise practive of querying on a clustering key without also defining a partition key From 051ab7bf4a4a01161108d5e8327a75ce558e8fbc Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Fri, 15 Aug 2014 11:59:39 -0700 Subject: [PATCH 0910/3726] added static column support --- cqlengine/columns.py | 7 +++-- cqlengine/models.py | 1 - cqlengine/tests/management/test_management.py | 30 ++++++++++++++++++- cqlengine/tests/model/test_model.py | 4 ++- docs/topics/queryset.rst | 2 +- 5 files changed, 38 insertions(+), 6 deletions(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index e3413a08e0..af8950230b 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -99,7 +99,8 @@ def __init__(self, default=None, required=False, clustering_order=None, - polymorphic_key=False): + polymorphic_key=False, + static=False): """ :param primary_key: bool flag, indicates this column is a primary key. The first primary key defined on a model is the partition key (unless partition keys are set), all others are cluster keys @@ -125,6 +126,7 @@ def __init__(self, self.polymorphic_key = polymorphic_key #the column name in the model definition self.column_name = None + self.static = static self.value = None @@ -182,7 +184,8 @@ def get_column_def(self): """ Returns a column definition for CQL table definition """ - return '{} {}'.format(self.cql, self.db_type) + static = "static" if self.static else "" + return '{} {} {}'.format(self.cql, self.db_type, static) def set_column_name(self, name): """ diff --git a/cqlengine/models.py b/cqlengine/models.py index cbb1485dea..d725e42c9f 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -637,7 +637,6 @@ def _transform_column(col_name, col_obj): is_polymorphic_base = any([c[1].polymorphic_key for c in column_definitions]) column_definitions = [x for x in inherited_columns.items()] + column_definitions - polymorphic_columns = [c for c in column_definitions if c[1].polymorphic_key] is_polymorphic = len(polymorphic_columns) > 0 if len(polymorphic_columns) > 1: diff --git a/cqlengine/tests/management/test_management.py b/cqlengine/tests/management/test_management.py index 4c586eac4d..c2fb19afc2 100644 --- a/cqlengine/tests/management/test_management.py +++ b/cqlengine/tests/management/test_management.py @@ -1,5 +1,6 @@ - +import mock from cqlengine import ALL, CACHING_ALL, CACHING_NONE +from cqlengine.connection import get_session from cqlengine.exceptions import CQLEngineException from cqlengine.management import get_fields, sync_table, drop_table from cqlengine.tests.base import BaseCassEngTestCase, CASSANDRA_VERSION @@ -244,3 +245,30 @@ class FakeModel(object): def test_failure(self): with self.assertRaises(CQLEngineException): sync_table(self.FakeModel) + + +def test_static_columns(): + class StaticModel(Model): + id = columns.Integer(primary_key=True) + c = columns.Integer(primary_key=True) + name = columns.Text(static=True) + + drop_table(StaticModel) + + from mock import patch + + from cqlengine.connection import get_session + session = get_session() + + with patch.object(session, "execute", side_effect=Exception) as m: + try: + sync_table(StaticModel) + except: + pass + + assert m.call_count > 0 + statement = m.call_args[0][0].query_string + assert '"name" text static' in statement, statement + + + diff --git a/cqlengine/tests/model/test_model.py b/cqlengine/tests/model/test_model.py index 1bd143d01c..9d4e0199ae 100644 --- a/cqlengine/tests/model/test_model.py +++ b/cqlengine/tests/model/test_model.py @@ -53,4 +53,6 @@ def test_model_with_method_name_conflict(self): class IllegalFilterColumnModel(Model): __keyspace__ = 'test' my_primary_key = columns.Integer(primary_key=True) - filter = columns.Text() \ No newline at end of file + filter = columns.Text() + + diff --git a/docs/topics/queryset.rst b/docs/topics/queryset.rst index d03a54a426..2466decee6 100644 --- a/docs/topics/queryset.rst +++ b/docs/topics/queryset.rst @@ -464,7 +464,7 @@ QuerySet method reference Sets the field to order on. .. code-block:: python - + from uuid import uuid1,uuid4 class Comment(Model): From caddded43a2a0ee6566a6c8308c046a326faf19b Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Fri, 15 Aug 2014 12:02:19 -0700 Subject: [PATCH 0911/3726] docs for shared columns --- docs/topics/models.rst | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/topics/models.rst b/docs/topics/models.rst index d21c6aa8fa..f2e328d2ef 100644 --- a/docs/topics/models.rst +++ b/docs/topics/models.rst @@ -133,6 +133,9 @@ Column Options :attr:`~cqlengine.columns.BaseColumn.required` If True, this model cannot be saved without a value defined for this column. Defaults to False. Primary key fields always require values. + :attr:`~cqlengine.columns.BaseColumn.static` + Defined a column as static. Static columns are shared by all rows in a partition. + Model Methods ============= Below are the methods that can be called on model instances. From 3f6c5993a259a004d1c74e8cb263e4b0ba293945 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Fri, 15 Aug 2014 12:13:18 -0700 Subject: [PATCH 0912/3726] master travis ci badge --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index e4fef0a806..4398c8e894 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,8 @@ cqlengine =============== +.. image:: https://travis-ci.org/cqlengine/cqlengine.svg?branch=master + cqlengine is a Cassandra CQL 3 Object Mapper for Python **Users of versions < 0.16, the default keyspace 'cqlengine' has been removed. Please read this before upgrading:** [Breaking Changes](https://cqlengine.readthedocs.org/en/latest/topics/models.html#keyspace-change) From dc09912b1c90abc6dfe454fb68303a37fc09efdc Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Fri, 15 Aug 2014 12:14:57 -0700 Subject: [PATCH 0913/3726] fixed travis image --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 4398c8e894..bddac61440 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ cqlengine =============== -.. image:: https://travis-ci.org/cqlengine/cqlengine.svg?branch=master +![cqlengine build status](https://travis-ci.org/cqlengine/cqlengine.svg?branch=master) cqlengine is a Cassandra CQL 3 Object Mapper for Python From 30c4a4c91e7965ab2d8e360abd987971eb06b03e Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Fri, 15 Aug 2014 13:47:35 -0700 Subject: [PATCH 0914/3726] updating readme, changelog --- README.md | 6 ++++++ changelog | 6 +++++- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index bddac61440..3bb53ae73b 100644 --- a/README.md +++ b/README.md @@ -82,3 +82,9 @@ example5 ## Contributing If you'd like to contribute to cqlengine, please read the [contributor guidelines](https://github.com/bdeggleston/cqlengine/blob/master/CONTRIBUTING.md) + + +## Authors + +cqlengine was developed primarily by (Blake Eggleston)[blakeeggleston](https://twitter.com/blakeeggleston) and [Jon Haddad](https://twitter.com/rustyrazorblade), with contributions from several others in the community. + diff --git a/changelog b/changelog index e044e0b7fc..39bfe14c2c 100644 --- a/changelog +++ b/changelog @@ -1,11 +1,15 @@ CHANGELOG +0.18.0 + + + 0.17.0 * retry_connect on setup() * optional default TTL on model -* fixed caching documentation +* fixed caching documentation 0.16.0 From 59077a0289c609ef8a9173248785e1a0db48d2c4 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Fri, 15 Aug 2014 14:55:47 -0700 Subject: [PATCH 0915/3726] script to generate changelog information, changelog update --- changelog | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/changelog b/changelog index 39bfe14c2c..0420badfb4 100644 --- a/changelog +++ b/changelog @@ -2,7 +2,20 @@ CHANGELOG 0.18.0 - +[260] support for static columns +[259] docs update - examples for the queryset method references +[258] improve docs around blind updates +[256] write docs about developing cqlengine itself +[254] use CASSANDRA_VERSION in tests +[252] memtable_flush_period_in_ms not in C 1.2 - guard against test failure +[251] test fails occasionally despite lists looking the same +[246] document Batches default behaviour +[239] add sphinx contrib module to docs +[237] update to cassandra-driver 2.1 +[232] update docs w/ links to external tutorials +[229] get travis to test different cassandra versions +[211] inet datatype support +[209] Python 3 support 0.17.0 From 0824fa45ea065f9a4c4294b5756804d868619df4 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Fri, 15 Aug 2014 14:55:58 -0700 Subject: [PATCH 0916/3726] script to update changelog --- bin/get_changelog.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) create mode 100644 bin/get_changelog.py diff --git a/bin/get_changelog.py b/bin/get_changelog.py new file mode 100644 index 0000000000..7844f66c0b --- /dev/null +++ b/bin/get_changelog.py @@ -0,0 +1,18 @@ +from github import Github +import sys, os + +g = Github(os.environ["GITHUB_TOKEN"]) + +milestone = sys.argv[1] +print "Fetching all issues for milestone %s" % milestone +repo = g.get_repo("cqlengine/cqlengine") + +milestones = repo.get_milestones() +milestone = [x for x in milestones if x.title == milestone][0] +issues = repo.get_issues(milestone=milestone, state="closed") + +for issue in issues: + print "[%d] %s" % (issue.number, issue.title) + +print("Done") + From 7c296474aa9abfc110e55225d0f993ec61506858 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Fri, 15 Aug 2014 15:38:56 -0700 Subject: [PATCH 0917/3726] version bump --- cqlengine/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cqlengine/VERSION b/cqlengine/VERSION index c5523bd09b..66333910a4 100644 --- a/cqlengine/VERSION +++ b/cqlengine/VERSION @@ -1 +1 @@ -0.17.0 +0.18.0 From fa2aabaec9c1aa7c9634ca074c522dc5223610c3 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Fri, 15 Aug 2014 16:10:33 -0700 Subject: [PATCH 0918/3726] improvements to manifest to exclude pyc and __pycache__ --- MANIFEST.in | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/MANIFEST.in b/MANIFEST.in index d7e1cb6199..ddd7d6490e 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -2,4 +2,11 @@ include AUTHORS include LICENSE include README.md recursive-include cqlengine * +global-exclude *.pyc +global-exclude __pycache__/* +global-exclude *.so +global-exclude .DS_Store + + + From d573b8201a76f94a0e7ebc9703352cd17f2f6065 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Fri, 15 Aug 2014 19:51:54 -0700 Subject: [PATCH 0919/3726] added pypi version --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 3bb53ae73b..8af77e7520 100644 --- a/README.md +++ b/README.md @@ -2,6 +2,7 @@ cqlengine =============== ![cqlengine build status](https://travis-ci.org/cqlengine/cqlengine.svg?branch=master) +![cqlengine pypi version](http://img.shields.io/pypi/v/cqlengine.svg) cqlengine is a Cassandra CQL 3 Object Mapper for Python From 845ac396312d153e30340de644f5139ccb69bc08 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Fri, 15 Aug 2014 19:52:46 -0700 Subject: [PATCH 0920/3726] monthly downloads --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 8af77e7520..0f1b785e41 100644 --- a/README.md +++ b/README.md @@ -3,6 +3,7 @@ cqlengine ![cqlengine build status](https://travis-ci.org/cqlengine/cqlengine.svg?branch=master) ![cqlengine pypi version](http://img.shields.io/pypi/v/cqlengine.svg) +![cqlengine monthly downloads](http://img.shields.io/pypi/dm/cqlengine.svg) cqlengine is a Cassandra CQL 3 Object Mapper for Python From 0803424b0bc6fefe27c737914162f331c63bb8a6 Mon Sep 17 00:00:00 2001 From: "mission.liao" Date: Sat, 16 Aug 2014 11:28:16 +0800 Subject: [PATCH 0921/3726] removing parameter of if_not_exists in QuerySet --- cqlengine/query.py | 4 ++-- cqlengine/tests/test_ifnotexists.py | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index 52e637a8e6..8751f9c934 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -709,9 +709,9 @@ def timestamp(self, timestamp): clone._timestamp = timestamp return clone - def if_not_exists(self, if_not_exists): + def if_not_exists(self): clone = copy.deepcopy(self) - clone._if_not_exists = if_not_exists + clone._if_not_exists = True return clone def update(self, **values): diff --git a/cqlengine/tests/test_ifnotexists.py b/cqlengine/tests/test_ifnotexists.py index 94af055e42..264ba0a979 100644 --- a/cqlengine/tests/test_ifnotexists.py +++ b/cqlengine/tests/test_ifnotexists.py @@ -46,7 +46,7 @@ def test_insert_if_not_exists_success(self): id = uuid4() TestIfNotExistsModel.create(id=id, count=8, text='123456789') - TestIfNotExistsModel.if_not_exists(True).create(id=id, count=9, text='111111111111') + TestIfNotExistsModel.if_not_exists().create(id=id, count=9, text='111111111111') q = TestIfNotExistsModel.objects(id=id) self.assertEqual(len(q), 1) @@ -62,7 +62,7 @@ def test_insert_if_not_exists_failure(self): id = uuid4() TestIfNotExistsModel.create(id=id, count=8, text='123456789') - TestIfNotExistsModel.if_not_exists(False).create(id=id, count=9, text='111111111111') + TestIfNotExistsModel.create(id=id, count=9, text='111111111111') q = TestIfNotExistsModel.objects(id=id) self.assertEquals(len(q), 1) @@ -80,7 +80,7 @@ def test_if_not_exists_included_on_create(self): session = get_session() with mock.patch.object(session, 'execute') as m: - TestIfNotExistsModel.if_not_exists(True).create(count=8) + TestIfNotExistsModel.if_not_exists().create(count=8) query = m.call_args[0][0].query_string self.assertIn("IF NOT EXISTS", query) @@ -99,7 +99,7 @@ def test_if_not_exists_included_on_save(self): def test_queryset_is_returned_on_class(self): """ ensure we get a queryset description back """ - qs = TestIfNotExistsModel.if_not_exists(True) + qs = TestIfNotExistsModel.if_not_exists() self.assertTrue(isinstance(qs, TestIfNotExistsModel.__queryset__), type(qs)) From 63fcae6a4ad451ccfbc592d3a228d5505178f2f5 Mon Sep 17 00:00:00 2001 From: "mission.liao" Date: Sat, 16 Aug 2014 11:36:14 +0800 Subject: [PATCH 0922/3726] refine code with skipUnless --- cqlengine/tests/test_ifnotexists.py | 43 +++++++++++++++-------------- 1 file changed, 23 insertions(+), 20 deletions(-) diff --git a/cqlengine/tests/test_ifnotexists.py b/cqlengine/tests/test_ifnotexists.py index 264ba0a979..cd567cfe36 100644 --- a/cqlengine/tests/test_ifnotexists.py +++ b/cqlengine/tests/test_ifnotexists.py @@ -1,10 +1,13 @@ +from unittest import skipUnless from cqlengine.management import sync_table, drop_table, create_keyspace, delete_keyspace -from cqlengine.tests.base import BaseCassEngTestCase, CASSANDRA_VERSION +from cqlengine.tests.base import BaseCassEngTestCase from cqlengine.models import Model from cqlengine import columns from uuid import uuid4 import mock -from cqlengine.connection import get_session +from cqlengine.connection import get_cluster, get_session + +cluster = get_cluster() class TestIfNotExistsModel(Model): @@ -39,37 +42,37 @@ def tearDownClass(cls): class IfNotExistsInsertTests(BaseIfNotExistsTest): + @skipUnless(cluster.protocol_version >= 2, "only runs against the cql3 protocol v2.0") def test_insert_if_not_exists_success(self): """ tests that insertion with if_not_exists work as expected """ - if CASSANDRA_VERSION >= 20: - id = uuid4() + id = uuid4() - TestIfNotExistsModel.create(id=id, count=8, text='123456789') - TestIfNotExistsModel.if_not_exists().create(id=id, count=9, text='111111111111') + TestIfNotExistsModel.create(id=id, count=8, text='123456789') + TestIfNotExistsModel.if_not_exists().create(id=id, count=9, text='111111111111') - q = TestIfNotExistsModel.objects(id=id) - self.assertEqual(len(q), 1) + q = TestIfNotExistsModel.objects(id=id) + self.assertEqual(len(q), 1) - tm = q.first() - self.assertEquals(tm.count, 8) - self.assertEquals(tm.text, '123456789') + tm = q.first() + self.assertEquals(tm.count, 8) + self.assertEquals(tm.text, '123456789') + @skipUnless(cluster.protocol_version >= 2, "only runs against the cql3 protocol v2.0") def test_insert_if_not_exists_failure(self): """ tests that insertion with if_not_exists failure """ - if CASSANDRA_VERSION >= 20: - id = uuid4() + id = uuid4() - TestIfNotExistsModel.create(id=id, count=8, text='123456789') - TestIfNotExistsModel.create(id=id, count=9, text='111111111111') + TestIfNotExistsModel.create(id=id, count=8, text='123456789') + TestIfNotExistsModel.create(id=id, count=9, text='111111111111') - q = TestIfNotExistsModel.objects(id=id) - self.assertEquals(len(q), 1) + q = TestIfNotExistsModel.objects(id=id) + self.assertEquals(len(q), 1) - tm = q.first() - self.assertEquals(tm.count, 9) - self.assertEquals(tm.text, '111111111111') + tm = q.first() + self.assertEquals(tm.count, 9) + self.assertEquals(tm.text, '111111111111') class IfNotExistsModelTest(BaseIfNotExistsTest): From fdd04857e5c2616990bd6c5388433a3b8db2875b Mon Sep 17 00:00:00 2001 From: "mission.liao" Date: Sat, 16 Aug 2014 22:06:53 +0800 Subject: [PATCH 0923/3726] add test case for batch query --- cqlengine/tests/test_ifnotexists.py | 61 +++++++++++++++++++++++------ 1 file changed, 49 insertions(+), 12 deletions(-) diff --git a/cqlengine/tests/test_ifnotexists.py b/cqlengine/tests/test_ifnotexists.py index cd567cfe36..b1dbe5f900 100644 --- a/cqlengine/tests/test_ifnotexists.py +++ b/cqlengine/tests/test_ifnotexists.py @@ -2,10 +2,11 @@ from cqlengine.management import sync_table, drop_table, create_keyspace, delete_keyspace from cqlengine.tests.base import BaseCassEngTestCase from cqlengine.models import Model -from cqlengine import columns +from cqlengine import columns, BatchQuery from uuid import uuid4 +from datetime import datetime import mock -from cqlengine.connection import get_cluster, get_session +from cqlengine.connection import get_cluster cluster = get_cluster() @@ -58,7 +59,6 @@ def test_insert_if_not_exists_success(self): self.assertEquals(tm.count, 8) self.assertEquals(tm.text, '123456789') - @skipUnless(cluster.protocol_version >= 2, "only runs against the cql3 protocol v2.0") def test_insert_if_not_exists_failure(self): """ tests that insertion with if_not_exists failure """ @@ -74,15 +74,48 @@ def test_insert_if_not_exists_failure(self): self.assertEquals(tm.count, 9) self.assertEquals(tm.text, '111111111111') + @skipUnless(cluster.protocol_version >= 2, "only runs against the cql3 protocol v2.0") + def test_batch_insert_if_not_exists_success(self): + """ tests that batch insertion with if_not_exists work as expected """ + + id = uuid4() + + with BatchQuery() as b: + TestIfNotExistsModel.batch(b).if_not_exists().create(id=id, count=8, text='123456789') + + with BatchQuery() as b: + TestIfNotExistsModel.batch(b).if_not_exists().create(id=id, count=9, text='111111111111') + + q = TestIfNotExistsModel.objects(id=id) + self.assertEqual(len(q), 1) + + tm = q.first() + self.assertEquals(tm.count, 8) + self.assertEquals(tm.text, '123456789') + + def test_batch_insert_if_not_exists_failure(self): + """ tests that batch insertion with if_not_exists failure """ + id = uuid4() + + with BatchQuery() as b: + TestIfNotExistsModel.batch(b).create(id=id, count=8, text='123456789') + with BatchQuery() as b: + TestIfNotExistsModel.batch(b).create(id=id, count=9, text='111111111111') + + q = TestIfNotExistsModel.objects(id=id) + self.assertEquals(len(q), 1) + + tm = q.first() + self.assertEquals(tm.count, 9) + self.assertEquals(tm.text, '111111111111') + class IfNotExistsModelTest(BaseIfNotExistsTest): def test_if_not_exists_included_on_create(self): """ tests that if_not_exists on models works as expected """ - session = get_session() - - with mock.patch.object(session, 'execute') as m: + with mock.patch.object(self.session, 'execute') as m: TestIfNotExistsModel.if_not_exists().create(count=8) query = m.call_args[0][0].query_string @@ -91,9 +124,7 @@ def test_if_not_exists_included_on_create(self): def test_if_not_exists_included_on_save(self): """ tests if we correctly put 'IF NOT EXISTS' for insert statement """ - session = get_session() - - with mock.patch.object(session, 'execute') as m: + with mock.patch.object(self.session, 'execute') as m: tm = TestIfNotExistsModel(count=8) tm.if_not_exists(True).save() @@ -105,6 +136,14 @@ def test_queryset_is_returned_on_class(self): qs = TestIfNotExistsModel.if_not_exists() self.assertTrue(isinstance(qs, TestIfNotExistsModel.__queryset__), type(qs)) + def test_batch_if_not_exists(self): + """ ensure 'IF NOT EXISTS' exists in statement when in batch """ + with mock.patch.object(self.session, 'execute') as m: + with BatchQuery() as b: + TestIfNotExistsModel.batch(b).if_not_exists().create(count=8) + + self.assertIn("IF NOT EXISTS", m.call_args[0][0].query_string) + class IfNotExistsInstanceTest(BaseIfNotExistsTest): @@ -122,13 +161,11 @@ def test_if_not_exists_is_not_include_with_query_on_update(self): """ make sure we don't put 'IF NOT EXIST' in update statements """ - session = get_session() - o = TestIfNotExistsModel.create(text="whatever") o.text = "new stuff" o = o.if_not_exists(True) - with mock.patch.object(session, 'execute') as m: + with mock.patch.object(self.session, 'execute') as m: o.save() query = m.call_args[0][0].query_string From 463c5c8007c6ef78c256dce15fc239d34b1eb8e4 Mon Sep 17 00:00:00 2001 From: "mission.liao" Date: Sat, 16 Aug 2014 23:32:12 +0800 Subject: [PATCH 0924/3726] refine doc --- docs/topics/models.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/topics/models.rst b/docs/topics/models.rst index be324ef1b0..216978cad8 100644 --- a/docs/topics/models.rst +++ b/docs/topics/models.rst @@ -185,13 +185,13 @@ Model Methods Sets the ttl values to run instance updates and inserts queries with. - .. method:: if_not_exists(enable_or_not) + .. method:: if_not_exists() Check the existence of an object before insertion. The existence of an object is determined by its primary key(s). And please note using this flag would incur performance cost. - This method supported on Cassandra 2.0 or later. + This method is supported on Cassandra 2.0 or later. .. method:: update(**values) From eece3131e1051241ca325a10e3b0c841886b3f64 Mon Sep 17 00:00:00 2001 From: "mission.liao" Date: Sat, 16 Aug 2014 23:46:05 +0800 Subject: [PATCH 0925/3726] add note on statement ordering in batch query --- docs/topics/faq.rst | 44 +++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 43 insertions(+), 1 deletion(-) diff --git a/docs/topics/faq.rst b/docs/topics/faq.rst index b250180bb2..9c3c7beb3f 100644 --- a/docs/topics/faq.rst +++ b/docs/topics/faq.rst @@ -5,4 +5,46 @@ Frequently Asked Questions Q: Why don't updates work correctly on models instantiated as Model(field=blah, field2=blah2)? ------------------------------------------------------------------- -A: The recommended way to create new rows is with the models .create method. The values passed into a model's init method are interpreted by the model as the values as they were read from a row. This allows the model to "know" which rows have changed since the row was read out of cassandra, and create suitable update statements. \ No newline at end of file +A: The recommended way to create new rows is with the models .create method. The values passed into a model's init method are interpreted by the model as the values as they were read from a row. This allows the model to "know" which rows have changed since the row was read out of cassandra, and create suitable update statements. + +Q: How to preserve ordering in batch query? +------------------------------------------- + +A: Statement Ordering is not supported by CQL3 batches. Therefore, +once cassandra needs resolving conflict(Updating the same column in one batch), +The algorithm below would be used. + + * If timestamps are different, pick the column with the largest timestamp (the value being a regular column or a tombstone) + * If timestamps are the same, and one of the columns in a tombstone ('null') - pick the tombstone + * If timestamps are the same, and none of the columns are tombstones, pick the column with the largest value + +Below is an example to show this scenario. + +.. code-block:: python + + class MyMode(Model): + id = columns.Integer(primary_key=True) + count = columns.Integer() + text = columns.Text() + + with BatchQuery() as b: + MyModel.batch(b).create(id=1, count=2, text='123') + MyModel.batch(b).create(id=1, count=3, text='111') + + assert MyModel.objects(id=1).first().count == 3 + assert MyModel.objects(id=1).first().text == '123' + +The largest value of count is 3, and the largest value of text would be '123'. + +The workaround is applying timestamp to each statement, then Cassandra would +resolve to the statement with the lastest timestamp. + +.. code-block:: python + + with BatchQuery() as b: + MyModel.timestamp(datetime.now()).batch(b).create(id=1, count=2, text='123') + MyModel.timestamp(datetime.now()).batch(b).create(id=1, count=3, text='111') + + assert MyModel.objects(id=1).first().count == 3 + assert MyModel.objects(id=1).first().text == '111' + From 3e6e94dcb03af7981890e2c14c8c2c0e33ca76f7 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Mon, 18 Aug 2014 09:23:42 -0700 Subject: [PATCH 0926/3726] fix for bytearray being passed to blob column --- changelog | 6 +++++- cqlengine/columns.py | 2 +- cqlengine/tests/columns/test_value_io.py | 6 ++++++ 3 files changed, 12 insertions(+), 2 deletions(-) diff --git a/changelog b/changelog index 0420badfb4..849a5f3ca4 100644 --- a/changelog +++ b/changelog @@ -1,5 +1,9 @@ CHANGELOG +0.18.1 + + + 0.18.0 [260] support for static columns @@ -8,7 +12,7 @@ CHANGELOG [256] write docs about developing cqlengine itself [254] use CASSANDRA_VERSION in tests [252] memtable_flush_period_in_ms not in C 1.2 - guard against test failure -[251] test fails occasionally despite lists looking the same +[251] test fails occasionally despite lists looking the same [246] document Batches default behaviour [239] add sphinx contrib module to docs [237] update to cassandra-driver 2.1 diff --git a/cqlengine/columns.py b/cqlengine/columns.py index af8950230b..9297d3d880 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -221,7 +221,7 @@ class Blob(Column): def to_database(self, value): - if not isinstance(value, six.binary_type): + if not isinstance(value, (six.binary_type, bytearray)): raise Exception("expecting a binary, got a %s" % type(value)) val = super(Bytes, self).to_database(value) diff --git a/cqlengine/tests/columns/test_value_io.py b/cqlengine/tests/columns/test_value_io.py index 45bc59455f..b7587afa31 100644 --- a/cqlengine/tests/columns/test_value_io.py +++ b/cqlengine/tests/columns/test_value_io.py @@ -85,6 +85,12 @@ class TestBlobIO(BaseColumnIOTest): pkey_val = six.b('blake'), uuid4().bytes data_val = six.b('eggleston'), uuid4().bytes +class TestBlobIO2(BaseColumnIOTest): + + column = columns.Blob + pkey_val = bytearray(six.b('blake')), uuid4().bytes + data_val = bytearray(six.b('eggleston')), uuid4().bytes + class TestTextIO(BaseColumnIOTest): column = columns.Text From 86b6666195939e53d6ca4a9616fc6dd64a214ce1 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Mon, 18 Aug 2014 09:50:09 -0700 Subject: [PATCH 0927/3726] updated changelog --- changelog | 1 + 1 file changed, 1 insertion(+) diff --git a/changelog b/changelog index 849a5f3ca4..e0fdec574f 100644 --- a/changelog +++ b/changelog @@ -2,6 +2,7 @@ CHANGELOG 0.18.1 +[264] fixed support for bytearrays in blob columns 0.18.0 From 86bc6203ccdc1891cc91762c0f26da9c57fd7bf5 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Mon, 18 Aug 2014 09:50:28 -0700 Subject: [PATCH 0928/3726] bug fix bump --- cqlengine/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cqlengine/VERSION b/cqlengine/VERSION index 66333910a4..249afd517d 100644 --- a/cqlengine/VERSION +++ b/cqlengine/VERSION @@ -1 +1 @@ -0.18.0 +0.18.1 From 921c5245231994d280a4c2362ff006524fda4c79 Mon Sep 17 00:00:00 2001 From: Andy Zhao Date: Tue, 19 Aug 2014 22:55:32 -0400 Subject: [PATCH 0929/3726] fix updating static column only exception fix updating static column only exception due to extra clustering key in where clause --- cqlengine/query.py | 7 ++- cqlengine/tests/columns/test_static_column.py | 58 +++++++++++++++++++ 2 files changed, 63 insertions(+), 2 deletions(-) create mode 100644 cqlengine/tests/columns/test_static_column.py diff --git a/cqlengine/query.py b/cqlengine/query.py index 996526022d..de929c76e0 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -826,7 +826,7 @@ def update(self): if self.instance is None: raise CQLEngineException("DML Query intance attribute is None") assert type(self.instance) == self.model - + static_update_only = True statement = UpdateStatement(self.column_family_name, ttl=self._ttl, timestamp=self._timestamp) #get defined fields and their column names for name, col in self.model._columns.items(): @@ -841,7 +841,8 @@ def update(self): # don't update something if it hasn't changed if not val_mgr.changed and not isinstance(col, Counter): continue - + + static_update_only = (static_update_only and col.static) if isinstance(col, (BaseContainerColumn, Counter)): # get appropriate clause if isinstance(col, List): klass = ListUpdateClause @@ -863,6 +864,8 @@ def update(self): if statement.get_context_size() > 0 or self.instance._has_counter: for name, col in self.model._primary_keys.items(): + if static_update_only and (not col.partition_key): + continue statement.add_where_clause(WhereClause( col.db_field_name, EqualsOperator(), diff --git a/cqlengine/tests/columns/test_static_column.py b/cqlengine/tests/columns/test_static_column.py new file mode 100644 index 0000000000..3cf9e97273 --- /dev/null +++ b/cqlengine/tests/columns/test_static_column.py @@ -0,0 +1,58 @@ +from uuid import uuid4 + +from cqlengine import Model +from cqlengine import columns +from cqlengine.management import sync_table, drop_table +from cqlengine.models import ModelDefinitionException +from cqlengine.tests.base import BaseCassEngTestCase + + +class TestStaticModel(Model): + __keyspace__ = 'test' + partition = columns.UUID(primary_key=True, default=uuid4) + cluster = columns.UUID(primary_key=True, default=uuid4) + static = columns.Text(static=True) + text = columns.Text() + + +class TestStaticColumn(BaseCassEngTestCase): + + @classmethod + def setUpClass(cls): + super(TestStaticColumn, cls).setUpClass() + drop_table(TestStaticModel) + sync_table(TestStaticModel) + + @classmethod + def tearDownClass(cls): + super(TestStaticColumn, cls).tearDownClass() + drop_table(TestStaticModel) + + def test_mixed_updates(self): + """ Tests that updates on both static and non-static columns work as intended """ + instance = TestStaticModel.create() + instance.static = "it's shared" + instance.text = "some text" + instance.save() + + u = TestStaticModel.get(partition=instance.partition) + u.static = "it's still shared" + u.text = "another text" + u.update() + actual = TestStaticModel.get(partition=u.partition) + + assert actual.static == "it's still shared" + + def test_static_only_updates(self): + """ Tests that updates on static only column work as intended """ + instance = TestStaticModel.create() + instance.static = "it's shared" + instance.text = "some text" + instance.save() + + u = TestStaticModel.get(partition=instance.partition) + u.static = "it's still shared" + u.update() + actual = TestStaticModel.get(partition=u.partition) + assert actual.static == "it's still shared" + From d986b0f7e6360ff83cf1a01d928bc10392e9d8cb Mon Sep 17 00:00:00 2001 From: andy8zhao Date: Thu, 21 Aug 2014 16:25:24 -0400 Subject: [PATCH 0930/3726] skip test if protocol version < 2 Skip static column only update unit test if cassandra protocol version < 2 --- cqlengine/tests/columns/test_static_column.py | 6 +++++- cqlout | 0 2 files changed, 5 insertions(+), 1 deletion(-) create mode 100644 cqlout diff --git a/cqlengine/tests/columns/test_static_column.py b/cqlengine/tests/columns/test_static_column.py index 3cf9e97273..3d91564745 100644 --- a/cqlengine/tests/columns/test_static_column.py +++ b/cqlengine/tests/columns/test_static_column.py @@ -1,11 +1,13 @@ from uuid import uuid4 - +from unittest import skipUnless from cqlengine import Model from cqlengine import columns from cqlengine.management import sync_table, drop_table from cqlengine.models import ModelDefinitionException from cqlengine.tests.base import BaseCassEngTestCase +from cqlengine.connection import get_cluster +cluster = get_cluster() class TestStaticModel(Model): __keyspace__ = 'test' @@ -28,6 +30,7 @@ def tearDownClass(cls): super(TestStaticColumn, cls).tearDownClass() drop_table(TestStaticModel) + @skipUnless(cluster.protocol_version >= 2, "only runs against the cql3 protocol v2.0") def test_mixed_updates(self): """ Tests that updates on both static and non-static columns work as intended """ instance = TestStaticModel.create() @@ -43,6 +46,7 @@ def test_mixed_updates(self): assert actual.static == "it's still shared" + @skipUnless(cluster.protocol_version >= 2, "only runs against the cql3 protocol v2.0") def test_static_only_updates(self): """ Tests that updates on static only column work as intended """ instance = TestStaticModel.create() diff --git a/cqlout b/cqlout new file mode 100644 index 0000000000..e69de29bb2 From 68c06ae304ae2895dcfbec935a9129201a914807 Mon Sep 17 00:00:00 2001 From: andy8zhao Date: Thu, 21 Aug 2014 16:32:02 -0400 Subject: [PATCH 0931/3726] remove unwanted file remove unwanted file --- cqlout | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 cqlout diff --git a/cqlout b/cqlout deleted file mode 100644 index e69de29bb2..0000000000 From ad318166b2154efba84b7c2285b9a905b8197c3e Mon Sep 17 00:00:00 2001 From: andy8zhao Date: Thu, 21 Aug 2014 17:03:52 -0400 Subject: [PATCH 0932/3726] skip test if protocol < 2 skip test if protocol < 2 --- cqlengine/tests/columns/test_static_column.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/cqlengine/tests/columns/test_static_column.py b/cqlengine/tests/columns/test_static_column.py index 3d91564745..34c553dc89 100644 --- a/cqlengine/tests/columns/test_static_column.py +++ b/cqlengine/tests/columns/test_static_column.py @@ -4,7 +4,7 @@ from cqlengine import columns from cqlengine.management import sync_table, drop_table from cqlengine.models import ModelDefinitionException -from cqlengine.tests.base import BaseCassEngTestCase +from cqlengine.tests.base import BaseCassEngTestCase, CASSANDRA_VERSION from cqlengine.connection import get_cluster cluster = get_cluster() @@ -23,7 +23,8 @@ class TestStaticColumn(BaseCassEngTestCase): def setUpClass(cls): super(TestStaticColumn, cls).setUpClass() drop_table(TestStaticModel) - sync_table(TestStaticModel) + if CASSANDRA_VERSION >= 20: + sync_table(TestStaticModel) @classmethod def tearDownClass(cls): From 0319cfc23d88add5f2e3c6807788c2bdb7200bfd Mon Sep 17 00:00:00 2001 From: andy8zhao Date: Fri, 22 Aug 2014 00:04:07 -0400 Subject: [PATCH 0933/3726] fix static container column sync_table bug For static container column, "static" keyword is missing from cql "create table" statement. --- cqlengine/columns.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index 9297d3d880..056853ab98 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -545,9 +545,9 @@ def get_column_def(self): """ Returns a column definition for CQL table definition """ + static = "static" if self.static else "" db_type = self.db_type.format(self.value_type.db_type) - return '{} {}'.format(self.cql, db_type) - + return '{} {} {}'.format(self.cql, db_type, static) def _val_is_null(self, val): return not val @@ -703,7 +703,8 @@ def get_column_def(self): self.key_type.db_type, self.value_type.db_type ) - return '{} {}'.format(self.cql, db_type) + static = "static" if self.static else "" + return '{} {} {}'.format(self.cql, db_type, static) def validate(self, value): val = super(Map, self).validate(value) From 3f39854e8718d654e345b73481aa6c26bb7fa635 Mon Sep 17 00:00:00 2001 From: Kevin Deldycke Date: Wed, 27 Aug 2014 10:52:14 +0200 Subject: [PATCH 0934/3726] Ignore iPython left-overs A `Untitled0.ipynb` file was left in the [cqlengine-0.18.1.tar.gz](https://pypi.python.org/packages/source/c/cqlengine/cqlengine-0.18.1.tar.gz#md5=a9cb831315c8a5f2e370a7d32fc77cb5) archive on PyPi. --- .gitignore | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.gitignore b/.gitignore index 314eec67f7..b43bb3d158 100644 --- a/.gitignore +++ b/.gitignore @@ -42,3 +42,6 @@ html/ /data docs/_build + +#iPython +*.ipynb From babe153327dbac79a1c1d32c7b948e5728f75930 Mon Sep 17 00:00:00 2001 From: Gigi Date: Thu, 28 Aug 2014 11:25:55 -0700 Subject: [PATCH 0935/3726] Fix multiple bugs related to control connection timeout and schema refresh. 1. The Cluster's class-level 'control_connection_timeout' variable was never used 2. ControlConnection.wait_for_schema_agreement() ignored the timeout passed from Cluster and always used hard-coded 2.0 3. Still wait_for_schema_agreement() if an OperationTimeoutError occurred there was no sleep between attempts, which caused test_refresh_schema_timeout() to hang because it used FakeTime and no FakeTime ever elapsed. It is also very tight busy waiting, which is sub-optimal. My changes are: 1. Use the Cluster's class level 'control_connection_timeout' as the default for the __init__() argument 2. In wait_for_schema_agreement() use the self._timeout (passed from Cluster) to compute the actual timeout 3. Added poll_interval = 0.2, and sleep when an operation times out before attempting again (fixes the test and makes the busy waiting more relaxed) --- cassandra/cluster.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index fa5defd1ac..2183230514 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -353,9 +353,10 @@ def auth_provider(self, value): control_connection_timeout = 2.0 """ - A timeout, in seconds, for queries made by the control connection, such - as querying the current schema and information about nodes in the cluster. - If set to :const:`None`, there will be no timeout for these queries. + A default timeout, in seconds, for queries made by the control connection, + such as querying the current schema and information about nodes in the + cluster. If set to :const:`None`, there will be no timeout for these + queries. Can be overridden by passing it to __init__() """ sessions = None @@ -392,7 +393,7 @@ def __init__(self, protocol_version=2, executor_threads=2, max_schema_agreement_wait=10, - control_connection_timeout=2.0): + control_connection_timeout=control_connection_timeout): """ Any of the mutable Cluster attributes may be set as keyword arguments to the constructor. @@ -2117,6 +2118,7 @@ def wait_for_schema_agreement(self, connection=None, preloaded_results=None): log.debug("[control connection] Waiting for schema agreement") start = self._time.time() elapsed = 0 + poll_interval = 0.2 cl = ConsistencyLevel.ONE total_timeout = self._cluster.max_schema_agreement_wait schema_mismatches = None @@ -2124,12 +2126,13 @@ def wait_for_schema_agreement(self, connection=None, preloaded_results=None): peers_query = QueryMessage(query=self._SELECT_SCHEMA_PEERS, consistency_level=cl) local_query = QueryMessage(query=self._SELECT_SCHEMA_LOCAL, consistency_level=cl) try: - timeout = min(2.0, total_timeout - elapsed) + timeout = min(self._timeout, total_timeout - elapsed) peers_result, local_result = connection.wait_for_responses( peers_query, local_query, timeout=timeout) except OperationTimedOut as timeout: log.debug("[control connection] Timed out waiting for " "response during schema agreement check: %s", timeout) + self._time.sleep(poll_interval) elapsed = self._time.time() - start continue except ConnectionShutdown: @@ -2144,7 +2147,7 @@ def wait_for_schema_agreement(self, connection=None, preloaded_results=None): return True log.debug("[control connection] Schemas mismatched, trying again") - self._time.sleep(0.2) + self._time.sleep(poll_interval) elapsed = self._time.time() - start log.warn("Node %s is reporting a schema disagreement: %s", From bac0673eaf665973ea438fc18b1ec1136dc22323 Mon Sep 17 00:00:00 2001 From: "mission.liao" Date: Tue, 1 Jul 2014 18:48:58 +0800 Subject: [PATCH 0936/3726] 'IF NOT EXISTS' for insert statement The usage is just like 'ttl'. --- cqlengine/models.py | 24 +++++- cqlengine/query.py | 15 +++- cqlengine/statements.py | 21 +++++ cqlengine/tests/test_checkexist.py | 129 +++++++++++++++++++++++++++++ 4 files changed, 184 insertions(+), 5 deletions(-) create mode 100644 cqlengine/tests/test_checkexist.py diff --git a/cqlengine/models.py b/cqlengine/models.py index d725e42c9f..7748ca76a3 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -116,6 +116,23 @@ def timestamp_setter(ts): def __call__(self, *args, **kwargs): raise NotImplementedError +class CheckExistDescriptor(object): + """ + return a query set descriptor with a check-exist flag specified + """ + def __get__(self, instance, model): + if instance: + # instance method + def checkexist_setter(ce): + instance._check_exist = ce + return instance + return checkexist_setter + + return model.objects.check_exist + + def __call__(self, *args, **kwargs): + raise NotImplementedError + class ConsistencyDescriptor(object): """ returns a query set descriptor if called on Class, instance if it was an instance call @@ -225,6 +242,8 @@ class MultipleObjectsReturned(_MultipleObjectsReturned): pass # custom timestamps, see USING TIMESTAMP X timestamp = TimestampDescriptor() + + check_exist = CheckExistDescriptor() # _len is lazily created by __len__ @@ -276,6 +295,8 @@ class MultipleObjectsReturned(_MultipleObjectsReturned): pass _timestamp = None # optional timestamp to include with the operation (USING TIMESTAMP) + _check_exist = False # optional check_exist flag to check existence before insertion + def __init__(self, **values): self._values = {} self._ttl = self.__default_ttl__ @@ -528,7 +549,8 @@ def save(self): batch=self._batch, ttl=self._ttl, timestamp=self._timestamp, - consistency=self.__consistency__).save() + consistency=self.__consistency__, + check_exist=self._check_exist).save() #reset the value managers for v in self._values.values(): diff --git a/cqlengine/query.py b/cqlengine/query.py index de929c76e0..86c8dcef11 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -220,6 +220,7 @@ def __init__(self, model): self._ttl = getattr(model, '__default_ttl__', None) self._consistency = None self._timestamp = None + self._check_exist = False @property def column_family_name(self): @@ -569,7 +570,7 @@ def defer(self, fields): def create(self, **kwargs): return self.model(**kwargs).batch(self._batch).ttl(self._ttl).\ - consistency(self._consistency).\ + consistency(self._consistency).check_exist(self._check_exist).\ timestamp(self._timestamp).save() def delete(self): @@ -708,6 +709,11 @@ def timestamp(self, timestamp): clone._timestamp = timestamp return clone + def check_exist(self, check_exist): + clone = copy.deepcopy(self) + clone._check_exist = check_exist + return clone + def update(self, **values): """ Updates the rows in this queryset """ if not values: @@ -767,8 +773,9 @@ class DMLQuery(object): _ttl = None _consistency = None _timestamp = None + _check_exist = False - def __init__(self, model, instance=None, batch=None, ttl=None, consistency=None, timestamp=None): + def __init__(self, model, instance=None, batch=None, ttl=None, consistency=None, timestamp=None, check_exist=False): self.model = model self.column_family_name = self.model.column_family_name() self.instance = instance @@ -776,6 +783,7 @@ def __init__(self, model, instance=None, batch=None, ttl=None, consistency=None, self._ttl = ttl self._consistency = consistency self._timestamp = timestamp + self._check_exist = check_exist def _execute(self, q): if self._batch: @@ -890,7 +898,7 @@ def save(self): if self.instance._has_counter or self.instance._can_update(): return self.update() else: - insert = InsertStatement(self.column_family_name, ttl=self._ttl, timestamp=self._timestamp) + insert = InsertStatement(self.column_family_name, ttl=self._ttl, timestamp=self._timestamp, check_exist=self._check_exist) for name, col in self.instance._columns.items(): val = getattr(self.instance, name, None) if col._val_is_null(val): @@ -906,7 +914,6 @@ def save(self): # caused by pointless update queries if not insert.is_empty: self._execute(insert) - # delete any nulled columns self._delete_null_columns() diff --git a/cqlengine/statements.py b/cqlengine/statements.py index f1681a1658..c357f07ea1 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -619,6 +619,24 @@ def get_context(self): class InsertStatement(AssignmentStatement): """ an cql insert select statement """ + def __init__(self, + table, + assignments=None, + consistency=None, + where=None, + ttl=None, + timestamp=None, + check_exist=False): + super(InsertStatement, self).__init__( + table, + assignments=assignments, + consistency=consistency, + where=where, + ttl=ttl, + timestamp=timestamp) + + self.check_exist = check_exist + def add_where_clause(self, clause): raise StatementException("Cannot add where clauses to insert statements") @@ -633,6 +651,9 @@ def __unicode__(self): qs += ['VALUES'] qs += ["({})".format(', '.join(['%({})s'.format(v) for v in values]))] + if self.check_exist: + qs += ["IF NOT EXISTS"] + if self.ttl: qs += ["USING TTL {}".format(self.ttl)] diff --git a/cqlengine/tests/test_checkexist.py b/cqlengine/tests/test_checkexist.py new file mode 100644 index 0000000000..7797459e62 --- /dev/null +++ b/cqlengine/tests/test_checkexist.py @@ -0,0 +1,129 @@ +from cqlengine.management import sync_table, drop_table, create_keyspace, delete_keyspace +from cqlengine.tests.base import BaseCassEngTestCase +from cqlengine.models import Model +from cqlengine import columns +from uuid import uuid4 +import mock +from cqlengine.connection import get_session + + +class TestChechExistModel(Model): + + __keyspace__ = 'cqlengine_test_checkexist' + + id = columns.UUID(primary_key=True, default=lambda:uuid4()) + count = columns.Integer() + text = columns.Text(required=False) + + +class BaseCheckExistTest(BaseCassEngTestCase): + + @classmethod + def setUpClass(cls): + super(BaseCheckExistTest, cls).setUpClass() + """ + when receiving an insert statement with 'if not exist', cassandra would + perform a read with QUORUM level. Unittest would be failed if replica_factor + is 3 and one node only. Therefore I have create a new keyspace with + replica_factor:1. + """ + create_keyspace(TestChechExistModel.__keyspace__, replication_factor=1) + sync_table(TestChechExistModel) + + @classmethod + def tearDownClass(cls): + super(BaseCassEngTestCase, cls).tearDownClass() + drop_table(TestChechExistModel) + delete_keyspace(TestChechExistModel.__keyspace__) + + +class CheckExistInsertTests(BaseCheckExistTest): + + def test_insert_check_exist_success(self): + """ tests that insertion with check_exist work as expected """ + id = uuid4() + + TestChechExistModel.create(id=id, count=8, text='123456789') + TestChechExistModel.check_exist(True).create(id=id, count=9, text='111111111111') + + q = TestChechExistModel.objects(id=id) + self.assertEqual(len(q), 1) + + tm = q.first() + self.assertEquals(tm.count, 8) + self.assertEquals(tm.text, '123456789') + + def test_insert_check_exist_failure(self): + """ tests that insertion with check_exist failure """ + id = uuid4() + + TestChechExistModel.create(id=id, count=8, text='123456789') + TestChechExistModel.check_exist(False).create(id=id, count=9, text='111111111111') + + q = TestChechExistModel.objects(id=id) + self.assertEquals(len(q), 1) + + tm = q.first() + self.assertEquals(tm.count, 9) + self.assertEquals(tm.text, '111111111111') + + +class CheckExistModelTest(BaseCheckExistTest): + + def test_check_exist_included_on_create(self): + """ tests that check_exist on models works as expected """ + + session = get_session() + + with mock.patch.object(session, 'execute') as m: + TestChechExistModel.check_exist(True).create(count=8) + + query = m.call_args[0][0].query_string + self.assertIn("IF NOT EXISTS", query) + + def test_check_exist_included_on_save(self): + + session = get_session() + + with mock.patch.object(session, 'execute') as m: + tm = TestChechExistModel(count=8) + tm.check_exist(True).save() + + query = m.call_args[0][0].query_string + self.assertIn("IF NOT EXISTS", query) + + def test_queryset_is_returned_on_class(self): + """ ensure we get a queryset description back """ + qs = TestChechExistModel.check_exist(True) + self.assertTrue(isinstance(qs, TestChechExistModel.__queryset__), type(qs)) + + +class CheckExistInstanceTest(BaseCheckExistTest): + + def test_instance_is_returned(self): + """ + ensures that we properly handle the instance.check_exist(True).save() + scenario + """ + o = TestChechExistModel.create(text="whatever") + o.text = "new stuff" + o = o.check_exist(True) + self.assertEqual(True, o._check_exist) + + def test_check_exist_is_not_include_with_query_on_update(self): + """ + make sure we don't put 'IF NOT EXIST' in update statements + """ + session = get_session() + + o = TestChechExistModel.create(text="whatever") + o.text = "new stuff" + o = o.check_exist(True) + + with mock.patch.object(session, 'execute') as m: + o.save() + + query = m.call_args[0][0].query_string + self.assertNotIn("IF NOT EXIST", query) + + From b4f8e3ac8fefacd7e48932764cdf13426fb3a3b7 Mon Sep 17 00:00:00 2001 From: "mission.liao" Date: Wed, 2 Jul 2014 12:53:22 +0800 Subject: [PATCH 0937/3726] rename all 'check_exist' related things to 'if_not_exists' --- cqlengine/models.py | 18 ++--- cqlengine/query.py | 16 ++--- cqlengine/statements.py | 6 +- ...test_checkexist.py => test_ifnotexists.py} | 72 +++++++++---------- 4 files changed, 56 insertions(+), 56 deletions(-) rename cqlengine/tests/{test_checkexist.py => test_ifnotexists.py} (50%) diff --git a/cqlengine/models.py b/cqlengine/models.py index 7748ca76a3..7781242685 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -116,19 +116,19 @@ def timestamp_setter(ts): def __call__(self, *args, **kwargs): raise NotImplementedError -class CheckExistDescriptor(object): +class IfNotExistsDescriptor(object): """ - return a query set descriptor with a check-exist flag specified + return a query set descriptor with a if_not_exists flag specified """ def __get__(self, instance, model): if instance: # instance method - def checkexist_setter(ce): - instance._check_exist = ce + def ifnotexists_setter(ife): + instance._if_not_exists = ife return instance - return checkexist_setter + return ifnotexists_setter - return model.objects.check_exist + return model.objects.if_not_exists def __call__(self, *args, **kwargs): raise NotImplementedError @@ -243,7 +243,7 @@ class MultipleObjectsReturned(_MultipleObjectsReturned): pass # custom timestamps, see USING TIMESTAMP X timestamp = TimestampDescriptor() - check_exist = CheckExistDescriptor() + if_not_exists = IfNotExistsDescriptor() # _len is lazily created by __len__ @@ -295,7 +295,7 @@ class MultipleObjectsReturned(_MultipleObjectsReturned): pass _timestamp = None # optional timestamp to include with the operation (USING TIMESTAMP) - _check_exist = False # optional check_exist flag to check existence before insertion + _if_not_exists = False # optional if_not_exists flag to check existence before insertion def __init__(self, **values): self._values = {} @@ -550,7 +550,7 @@ def save(self): ttl=self._ttl, timestamp=self._timestamp, consistency=self.__consistency__, - check_exist=self._check_exist).save() + if_not_exists=self._if_not_exists).save() #reset the value managers for v in self._values.values(): diff --git a/cqlengine/query.py b/cqlengine/query.py index 86c8dcef11..c1ff531c10 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -220,7 +220,7 @@ def __init__(self, model): self._ttl = getattr(model, '__default_ttl__', None) self._consistency = None self._timestamp = None - self._check_exist = False + self._if_not_exists = False @property def column_family_name(self): @@ -570,7 +570,7 @@ def defer(self, fields): def create(self, **kwargs): return self.model(**kwargs).batch(self._batch).ttl(self._ttl).\ - consistency(self._consistency).check_exist(self._check_exist).\ + consistency(self._consistency).if_not_exists(self._if_not_exists).\ timestamp(self._timestamp).save() def delete(self): @@ -709,9 +709,9 @@ def timestamp(self, timestamp): clone._timestamp = timestamp return clone - def check_exist(self, check_exist): + def if_not_exists(self, if_not_exists): clone = copy.deepcopy(self) - clone._check_exist = check_exist + clone._if_not_exists = if_not_exists return clone def update(self, **values): @@ -773,9 +773,9 @@ class DMLQuery(object): _ttl = None _consistency = None _timestamp = None - _check_exist = False + _if_not_exists = False - def __init__(self, model, instance=None, batch=None, ttl=None, consistency=None, timestamp=None, check_exist=False): + def __init__(self, model, instance=None, batch=None, ttl=None, consistency=None, timestamp=None, if_not_exists=False): self.model = model self.column_family_name = self.model.column_family_name() self.instance = instance @@ -783,7 +783,7 @@ def __init__(self, model, instance=None, batch=None, ttl=None, consistency=None, self._ttl = ttl self._consistency = consistency self._timestamp = timestamp - self._check_exist = check_exist + self._if_not_exists = if_not_exists def _execute(self, q): if self._batch: @@ -898,7 +898,7 @@ def save(self): if self.instance._has_counter or self.instance._can_update(): return self.update() else: - insert = InsertStatement(self.column_family_name, ttl=self._ttl, timestamp=self._timestamp, check_exist=self._check_exist) + insert = InsertStatement(self.column_family_name, ttl=self._ttl, timestamp=self._timestamp, if_not_exists=self._if_not_exists) for name, col in self.instance._columns.items(): val = getattr(self.instance, name, None) if col._val_is_null(val): diff --git a/cqlengine/statements.py b/cqlengine/statements.py index c357f07ea1..a6cdf4b3e7 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -626,7 +626,7 @@ def __init__(self, where=None, ttl=None, timestamp=None, - check_exist=False): + if_not_exists=False): super(InsertStatement, self).__init__( table, assignments=assignments, @@ -635,7 +635,7 @@ def __init__(self, ttl=ttl, timestamp=timestamp) - self.check_exist = check_exist + self.if_not_exists = if_not_exists def add_where_clause(self, clause): raise StatementException("Cannot add where clauses to insert statements") @@ -651,7 +651,7 @@ def __unicode__(self): qs += ['VALUES'] qs += ["({})".format(', '.join(['%({})s'.format(v) for v in values]))] - if self.check_exist: + if self.if_not_exists: qs += ["IF NOT EXISTS"] if self.ttl: diff --git a/cqlengine/tests/test_checkexist.py b/cqlengine/tests/test_ifnotexists.py similarity index 50% rename from cqlengine/tests/test_checkexist.py rename to cqlengine/tests/test_ifnotexists.py index 7797459e62..bca737be6e 100644 --- a/cqlengine/tests/test_checkexist.py +++ b/cqlengine/tests/test_ifnotexists.py @@ -7,60 +7,60 @@ from cqlengine.connection import get_session -class TestChechExistModel(Model): +class TestIfNotExistsModel(Model): - __keyspace__ = 'cqlengine_test_checkexist' + __keyspace__ = 'cqlengine_test_ifnotexists' id = columns.UUID(primary_key=True, default=lambda:uuid4()) count = columns.Integer() text = columns.Text(required=False) -class BaseCheckExistTest(BaseCassEngTestCase): +class BaseIfNotExistsTest(BaseCassEngTestCase): @classmethod def setUpClass(cls): - super(BaseCheckExistTest, cls).setUpClass() + super(BaseIfNotExistsTest, cls).setUpClass() """ when receiving an insert statement with 'if not exist', cassandra would perform a read with QUORUM level. Unittest would be failed if replica_factor is 3 and one node only. Therefore I have create a new keyspace with replica_factor:1. """ - create_keyspace(TestChechExistModel.__keyspace__, replication_factor=1) - sync_table(TestChechExistModel) + create_keyspace(TestIfNotExistsModel.__keyspace__, replication_factor=1) + sync_table(TestIfNotExistsModel) @classmethod def tearDownClass(cls): super(BaseCassEngTestCase, cls).tearDownClass() - drop_table(TestChechExistModel) - delete_keyspace(TestChechExistModel.__keyspace__) + drop_table(TestIfNotExistsModel) + delete_keyspace(TestIfNotExistsModel.__keyspace__) -class CheckExistInsertTests(BaseCheckExistTest): +class IfNotExistsInsertTests(BaseIfNotExistsTest): - def test_insert_check_exist_success(self): - """ tests that insertion with check_exist work as expected """ + def test_insert_if_not_exists_success(self): + """ tests that insertion with if_not_exists work as expected """ id = uuid4() - TestChechExistModel.create(id=id, count=8, text='123456789') - TestChechExistModel.check_exist(True).create(id=id, count=9, text='111111111111') + TestIfNotExistsModel.create(id=id, count=8, text='123456789') + TestIfNotExistsModel.if_not_exists(True).create(id=id, count=9, text='111111111111') - q = TestChechExistModel.objects(id=id) + q = TestIfNotExistsModel.objects(id=id) self.assertEqual(len(q), 1) tm = q.first() self.assertEquals(tm.count, 8) self.assertEquals(tm.text, '123456789') - def test_insert_check_exist_failure(self): - """ tests that insertion with check_exist failure """ + def test_insert_if_not_exists_failure(self): + """ tests that insertion with if_not_exists failure """ id = uuid4() - TestChechExistModel.create(id=id, count=8, text='123456789') - TestChechExistModel.check_exist(False).create(id=id, count=9, text='111111111111') + TestIfNotExistsModel.create(id=id, count=8, text='123456789') + TestIfNotExistsModel.if_not_exists(False).create(id=id, count=9, text='111111111111') - q = TestChechExistModel.objects(id=id) + q = TestIfNotExistsModel.objects(id=id) self.assertEquals(len(q), 1) tm = q.first() @@ -68,57 +68,57 @@ def test_insert_check_exist_failure(self): self.assertEquals(tm.text, '111111111111') -class CheckExistModelTest(BaseCheckExistTest): +class IfNotExistsModelTest(BaseIfNotExistsTest): - def test_check_exist_included_on_create(self): - """ tests that check_exist on models works as expected """ + def test_if_not_exists_included_on_create(self): + """ tests that if_not_exists on models works as expected """ session = get_session() with mock.patch.object(session, 'execute') as m: - TestChechExistModel.check_exist(True).create(count=8) + TestIfNotExistsModel.if_not_exists(True).create(count=8) query = m.call_args[0][0].query_string self.assertIn("IF NOT EXISTS", query) - def test_check_exist_included_on_save(self): + def test_if_not_exists_included_on_save(self): session = get_session() with mock.patch.object(session, 'execute') as m: - tm = TestChechExistModel(count=8) - tm.check_exist(True).save() + tm = TestIfNotExistsModel(count=8) + tm.if_not_exists(True).save() query = m.call_args[0][0].query_string self.assertIn("IF NOT EXISTS", query) def test_queryset_is_returned_on_class(self): """ ensure we get a queryset description back """ - qs = TestChechExistModel.check_exist(True) - self.assertTrue(isinstance(qs, TestChechExistModel.__queryset__), type(qs)) + qs = TestIfNotExistsModel.if_not_exists(True) + self.assertTrue(isinstance(qs, TestIfNotExistsModel.__queryset__), type(qs)) -class CheckExistInstanceTest(BaseCheckExistTest): +class IfNotExistsInstanceTest(BaseIfNotExistsTest): def test_instance_is_returned(self): """ - ensures that we properly handle the instance.check_exist(True).save() + ensures that we properly handle the instance.if_not_exists(True).save() scenario """ - o = TestChechExistModel.create(text="whatever") + o = TestIfNotExistsModel.create(text="whatever") o.text = "new stuff" - o = o.check_exist(True) - self.assertEqual(True, o._check_exist) + o = o.if_not_exists(True) + self.assertEqual(True, o._if_not_exists) - def test_check_exist_is_not_include_with_query_on_update(self): + def test_if_not_exists_is_not_include_with_query_on_update(self): """ make sure we don't put 'IF NOT EXIST' in update statements """ session = get_session() - o = TestChechExistModel.create(text="whatever") + o = TestIfNotExistsModel.create(text="whatever") o.text = "new stuff" - o = o.check_exist(True) + o = o.if_not_exists(True) with mock.patch.object(session, 'execute') as m: o.save() From df4eb5dc4f1b79e34d0f280764eb05868998e8c1 Mon Sep 17 00:00:00 2001 From: "mission.liao" Date: Tue, 1 Jul 2014 18:48:58 +0800 Subject: [PATCH 0938/3726] 'IF NOT EXISTS' for insert statement The usage is just like 'ttl'. --- cqlengine/tests/test_checkexist.py | 129 +++++++++++++++++++++++++++++ 1 file changed, 129 insertions(+) create mode 100644 cqlengine/tests/test_checkexist.py diff --git a/cqlengine/tests/test_checkexist.py b/cqlengine/tests/test_checkexist.py new file mode 100644 index 0000000000..7797459e62 --- /dev/null +++ b/cqlengine/tests/test_checkexist.py @@ -0,0 +1,129 @@ +from cqlengine.management import sync_table, drop_table, create_keyspace, delete_keyspace +from cqlengine.tests.base import BaseCassEngTestCase +from cqlengine.models import Model +from cqlengine import columns +from uuid import uuid4 +import mock +from cqlengine.connection import get_session + + +class TestChechExistModel(Model): + + __keyspace__ = 'cqlengine_test_checkexist' + + id = columns.UUID(primary_key=True, default=lambda:uuid4()) + count = columns.Integer() + text = columns.Text(required=False) + + +class BaseCheckExistTest(BaseCassEngTestCase): + + @classmethod + def setUpClass(cls): + super(BaseCheckExistTest, cls).setUpClass() + """ + when receiving an insert statement with 'if not exist', cassandra would + perform a read with QUORUM level. Unittest would be failed if replica_factor + is 3 and one node only. Therefore I have create a new keyspace with + replica_factor:1. + """ + create_keyspace(TestChechExistModel.__keyspace__, replication_factor=1) + sync_table(TestChechExistModel) + + @classmethod + def tearDownClass(cls): + super(BaseCassEngTestCase, cls).tearDownClass() + drop_table(TestChechExistModel) + delete_keyspace(TestChechExistModel.__keyspace__) + + +class CheckExistInsertTests(BaseCheckExistTest): + + def test_insert_check_exist_success(self): + """ tests that insertion with check_exist work as expected """ + id = uuid4() + + TestChechExistModel.create(id=id, count=8, text='123456789') + TestChechExistModel.check_exist(True).create(id=id, count=9, text='111111111111') + + q = TestChechExistModel.objects(id=id) + self.assertEqual(len(q), 1) + + tm = q.first() + self.assertEquals(tm.count, 8) + self.assertEquals(tm.text, '123456789') + + def test_insert_check_exist_failure(self): + """ tests that insertion with check_exist failure """ + id = uuid4() + + TestChechExistModel.create(id=id, count=8, text='123456789') + TestChechExistModel.check_exist(False).create(id=id, count=9, text='111111111111') + + q = TestChechExistModel.objects(id=id) + self.assertEquals(len(q), 1) + + tm = q.first() + self.assertEquals(tm.count, 9) + self.assertEquals(tm.text, '111111111111') + + +class CheckExistModelTest(BaseCheckExistTest): + + def test_check_exist_included_on_create(self): + """ tests that check_exist on models works as expected """ + + session = get_session() + + with mock.patch.object(session, 'execute') as m: + TestChechExistModel.check_exist(True).create(count=8) + + query = m.call_args[0][0].query_string + self.assertIn("IF NOT EXISTS", query) + + def test_check_exist_included_on_save(self): + + session = get_session() + + with mock.patch.object(session, 'execute') as m: + tm = TestChechExistModel(count=8) + tm.check_exist(True).save() + + query = m.call_args[0][0].query_string + self.assertIn("IF NOT EXISTS", query) + + def test_queryset_is_returned_on_class(self): + """ ensure we get a queryset description back """ + qs = TestChechExistModel.check_exist(True) + self.assertTrue(isinstance(qs, TestChechExistModel.__queryset__), type(qs)) + + +class CheckExistInstanceTest(BaseCheckExistTest): + + def test_instance_is_returned(self): + """ + ensures that we properly handle the instance.check_exist(True).save() + scenario + """ + o = TestChechExistModel.create(text="whatever") + o.text = "new stuff" + o = o.check_exist(True) + self.assertEqual(True, o._check_exist) + + def test_check_exist_is_not_include_with_query_on_update(self): + """ + make sure we don't put 'IF NOT EXIST' in update statements + """ + session = get_session() + + o = TestChechExistModel.create(text="whatever") + o.text = "new stuff" + o = o.check_exist(True) + + with mock.patch.object(session, 'execute') as m: + o.save() + + query = m.call_args[0][0].query_string + self.assertNotIn("IF NOT EXIST", query) + + From 400c2174d955d06353779b713528c40bc0e59c00 Mon Sep 17 00:00:00 2001 From: "mission.liao" Date: Wed, 2 Jul 2014 12:53:22 +0800 Subject: [PATCH 0939/3726] rename all 'check_exist' related things to 'if_not_exists' --- cqlengine/tests/test_checkexist.py | 129 ----------------------------- 1 file changed, 129 deletions(-) delete mode 100644 cqlengine/tests/test_checkexist.py diff --git a/cqlengine/tests/test_checkexist.py b/cqlengine/tests/test_checkexist.py deleted file mode 100644 index 7797459e62..0000000000 --- a/cqlengine/tests/test_checkexist.py +++ /dev/null @@ -1,129 +0,0 @@ -from cqlengine.management import sync_table, drop_table, create_keyspace, delete_keyspace -from cqlengine.tests.base import BaseCassEngTestCase -from cqlengine.models import Model -from cqlengine import columns -from uuid import uuid4 -import mock -from cqlengine.connection import get_session - - -class TestChechExistModel(Model): - - __keyspace__ = 'cqlengine_test_checkexist' - - id = columns.UUID(primary_key=True, default=lambda:uuid4()) - count = columns.Integer() - text = columns.Text(required=False) - - -class BaseCheckExistTest(BaseCassEngTestCase): - - @classmethod - def setUpClass(cls): - super(BaseCheckExistTest, cls).setUpClass() - """ - when receiving an insert statement with 'if not exist', cassandra would - perform a read with QUORUM level. Unittest would be failed if replica_factor - is 3 and one node only. Therefore I have create a new keyspace with - replica_factor:1. - """ - create_keyspace(TestChechExistModel.__keyspace__, replication_factor=1) - sync_table(TestChechExistModel) - - @classmethod - def tearDownClass(cls): - super(BaseCassEngTestCase, cls).tearDownClass() - drop_table(TestChechExistModel) - delete_keyspace(TestChechExistModel.__keyspace__) - - -class CheckExistInsertTests(BaseCheckExistTest): - - def test_insert_check_exist_success(self): - """ tests that insertion with check_exist work as expected """ - id = uuid4() - - TestChechExistModel.create(id=id, count=8, text='123456789') - TestChechExistModel.check_exist(True).create(id=id, count=9, text='111111111111') - - q = TestChechExistModel.objects(id=id) - self.assertEqual(len(q), 1) - - tm = q.first() - self.assertEquals(tm.count, 8) - self.assertEquals(tm.text, '123456789') - - def test_insert_check_exist_failure(self): - """ tests that insertion with check_exist failure """ - id = uuid4() - - TestChechExistModel.create(id=id, count=8, text='123456789') - TestChechExistModel.check_exist(False).create(id=id, count=9, text='111111111111') - - q = TestChechExistModel.objects(id=id) - self.assertEquals(len(q), 1) - - tm = q.first() - self.assertEquals(tm.count, 9) - self.assertEquals(tm.text, '111111111111') - - -class CheckExistModelTest(BaseCheckExistTest): - - def test_check_exist_included_on_create(self): - """ tests that check_exist on models works as expected """ - - session = get_session() - - with mock.patch.object(session, 'execute') as m: - TestChechExistModel.check_exist(True).create(count=8) - - query = m.call_args[0][0].query_string - self.assertIn("IF NOT EXISTS", query) - - def test_check_exist_included_on_save(self): - - session = get_session() - - with mock.patch.object(session, 'execute') as m: - tm = TestChechExistModel(count=8) - tm.check_exist(True).save() - - query = m.call_args[0][0].query_string - self.assertIn("IF NOT EXISTS", query) - - def test_queryset_is_returned_on_class(self): - """ ensure we get a queryset description back """ - qs = TestChechExistModel.check_exist(True) - self.assertTrue(isinstance(qs, TestChechExistModel.__queryset__), type(qs)) - - -class CheckExistInstanceTest(BaseCheckExistTest): - - def test_instance_is_returned(self): - """ - ensures that we properly handle the instance.check_exist(True).save() - scenario - """ - o = TestChechExistModel.create(text="whatever") - o.text = "new stuff" - o = o.check_exist(True) - self.assertEqual(True, o._check_exist) - - def test_check_exist_is_not_include_with_query_on_update(self): - """ - make sure we don't put 'IF NOT EXIST' in update statements - """ - session = get_session() - - o = TestChechExistModel.create(text="whatever") - o.text = "new stuff" - o = o.check_exist(True) - - with mock.patch.object(session, 'execute') as m: - o.save() - - query = m.call_args[0][0].query_string - self.assertNotIn("IF NOT EXIST", query) - - From 1be4e1a3d0810e0aab28f3ba2cb1c74c8bfda229 Mon Sep 17 00:00:00 2001 From: "mission.liao" Date: Thu, 3 Jul 2014 14:41:37 +0800 Subject: [PATCH 0940/3726] update docs --- docs/topics/models.rst | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/docs/topics/models.rst b/docs/topics/models.rst index f2e328d2ef..982f458c96 100644 --- a/docs/topics/models.rst +++ b/docs/topics/models.rst @@ -188,6 +188,12 @@ Model Methods Sets the ttl values to run instance updates and inserts queries with. + .. method:: if_not_exists(enable_or_not) + + Check the existence of an object before insertion. The existence of an + object is determined by its primary key(s). And please note using this flag + would incur performance cost. + .. method:: update(**values) Performs an update on the model instance. You can pass in values to set on the model From bd97775b2b71b7ed29c9f6576eb660efc6379176 Mon Sep 17 00:00:00 2001 From: "mission.liao" Date: Fri, 15 Aug 2014 11:48:31 +0800 Subject: [PATCH 0941/3726] if not exist only supported by cassandra 2.0 or later. --- cqlengine/tests/test_ifnotexists.py | 39 ++++++++++++++++------------- 1 file changed, 22 insertions(+), 17 deletions(-) diff --git a/cqlengine/tests/test_ifnotexists.py b/cqlengine/tests/test_ifnotexists.py index bca737be6e..19690a2ef9 100644 --- a/cqlengine/tests/test_ifnotexists.py +++ b/cqlengine/tests/test_ifnotexists.py @@ -1,5 +1,5 @@ from cqlengine.management import sync_table, drop_table, create_keyspace, delete_keyspace -from cqlengine.tests.base import BaseCassEngTestCase +from cqlengine.tests.base import BaseCassEngTestCase, CASSANDRA_VERSION from cqlengine.models import Model from cqlengine import columns from uuid import uuid4 @@ -41,31 +41,35 @@ class IfNotExistsInsertTests(BaseIfNotExistsTest): def test_insert_if_not_exists_success(self): """ tests that insertion with if_not_exists work as expected """ - id = uuid4() - TestIfNotExistsModel.create(id=id, count=8, text='123456789') - TestIfNotExistsModel.if_not_exists(True).create(id=id, count=9, text='111111111111') + if CASSANDRA_VERSION >= 20: + id = uuid4() - q = TestIfNotExistsModel.objects(id=id) - self.assertEqual(len(q), 1) + TestIfNotExistsModel.create(id=id, count=8, text='123456789') + TestIfNotExistsModel.if_not_exists(True).create(id=id, count=9, text='111111111111') - tm = q.first() - self.assertEquals(tm.count, 8) - self.assertEquals(tm.text, '123456789') + q = TestIfNotExistsModel.objects(id=id) + self.assertEqual(len(q), 1) + + tm = q.first() + self.assertEquals(tm.count, 8) + self.assertEquals(tm.text, '123456789') def test_insert_if_not_exists_failure(self): """ tests that insertion with if_not_exists failure """ - id = uuid4() - TestIfNotExistsModel.create(id=id, count=8, text='123456789') - TestIfNotExistsModel.if_not_exists(False).create(id=id, count=9, text='111111111111') + if CASSANDRA_VERSION >= 20: + id = uuid4() + + TestIfNotExistsModel.create(id=id, count=8, text='123456789') + TestIfNotExistsModel.if_not_exists(False).create(id=id, count=9, text='111111111111') - q = TestIfNotExistsModel.objects(id=id) - self.assertEquals(len(q), 1) + q = TestIfNotExistsModel.objects(id=id) + self.assertEquals(len(q), 1) - tm = q.first() - self.assertEquals(tm.count, 9) - self.assertEquals(tm.text, '111111111111') + tm = q.first() + self.assertEquals(tm.count, 9) + self.assertEquals(tm.text, '111111111111') class IfNotExistsModelTest(BaseIfNotExistsTest): @@ -82,6 +86,7 @@ def test_if_not_exists_included_on_create(self): self.assertIn("IF NOT EXISTS", query) def test_if_not_exists_included_on_save(self): + """ tests if we correctly put 'IF NOT EXISTS' for insert statement """ session = get_session() From e25b8af9a98de4b1450bd259ba06ecedd6007520 Mon Sep 17 00:00:00 2001 From: "mission.liao" Date: Fri, 15 Aug 2014 11:59:44 +0800 Subject: [PATCH 0942/3726] if not exist is a new feature of cassandra 2.0 --- docs/topics/models.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/topics/models.rst b/docs/topics/models.rst index 982f458c96..359169c016 100644 --- a/docs/topics/models.rst +++ b/docs/topics/models.rst @@ -194,6 +194,8 @@ Model Methods object is determined by its primary key(s). And please note using this flag would incur performance cost. + This method supported on Cassandra 2.0 or later. + .. method:: update(**values) Performs an update on the model instance. You can pass in values to set on the model From 05c9b17f2f9caffb2836b4a2f965273baab78c17 Mon Sep 17 00:00:00 2001 From: "mission.liao" Date: Sat, 16 Aug 2014 11:28:16 +0800 Subject: [PATCH 0943/3726] removing parameter of if_not_exists in QuerySet --- cqlengine/query.py | 4 ++-- cqlengine/tests/test_ifnotexists.py | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index c1ff531c10..f6d5a2ee30 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -709,9 +709,9 @@ def timestamp(self, timestamp): clone._timestamp = timestamp return clone - def if_not_exists(self, if_not_exists): + def if_not_exists(self): clone = copy.deepcopy(self) - clone._if_not_exists = if_not_exists + clone._if_not_exists = True return clone def update(self, **values): diff --git a/cqlengine/tests/test_ifnotexists.py b/cqlengine/tests/test_ifnotexists.py index 19690a2ef9..70f33fac4d 100644 --- a/cqlengine/tests/test_ifnotexists.py +++ b/cqlengine/tests/test_ifnotexists.py @@ -46,7 +46,7 @@ def test_insert_if_not_exists_success(self): id = uuid4() TestIfNotExistsModel.create(id=id, count=8, text='123456789') - TestIfNotExistsModel.if_not_exists(True).create(id=id, count=9, text='111111111111') + TestIfNotExistsModel.if_not_exists().create(id=id, count=9, text='111111111111') q = TestIfNotExistsModel.objects(id=id) self.assertEqual(len(q), 1) @@ -62,7 +62,7 @@ def test_insert_if_not_exists_failure(self): id = uuid4() TestIfNotExistsModel.create(id=id, count=8, text='123456789') - TestIfNotExistsModel.if_not_exists(False).create(id=id, count=9, text='111111111111') + TestIfNotExistsModel.create(id=id, count=9, text='111111111111') q = TestIfNotExistsModel.objects(id=id) self.assertEquals(len(q), 1) @@ -80,7 +80,7 @@ def test_if_not_exists_included_on_create(self): session = get_session() with mock.patch.object(session, 'execute') as m: - TestIfNotExistsModel.if_not_exists(True).create(count=8) + TestIfNotExistsModel.if_not_exists().create(count=8) query = m.call_args[0][0].query_string self.assertIn("IF NOT EXISTS", query) @@ -99,7 +99,7 @@ def test_if_not_exists_included_on_save(self): def test_queryset_is_returned_on_class(self): """ ensure we get a queryset description back """ - qs = TestIfNotExistsModel.if_not_exists(True) + qs = TestIfNotExistsModel.if_not_exists() self.assertTrue(isinstance(qs, TestIfNotExistsModel.__queryset__), type(qs)) From aa586dee0360cdd0f16da8c79c403bd0f530c586 Mon Sep 17 00:00:00 2001 From: "mission.liao" Date: Sat, 16 Aug 2014 11:36:14 +0800 Subject: [PATCH 0944/3726] refine code with skipUnless --- cqlengine/tests/test_ifnotexists.py | 43 +++++++++++++++-------------- 1 file changed, 23 insertions(+), 20 deletions(-) diff --git a/cqlengine/tests/test_ifnotexists.py b/cqlengine/tests/test_ifnotexists.py index 70f33fac4d..1483475810 100644 --- a/cqlengine/tests/test_ifnotexists.py +++ b/cqlengine/tests/test_ifnotexists.py @@ -1,10 +1,13 @@ +from unittest import skipUnless from cqlengine.management import sync_table, drop_table, create_keyspace, delete_keyspace -from cqlengine.tests.base import BaseCassEngTestCase, CASSANDRA_VERSION +from cqlengine.tests.base import BaseCassEngTestCase from cqlengine.models import Model from cqlengine import columns from uuid import uuid4 import mock -from cqlengine.connection import get_session +from cqlengine.connection import get_cluster, get_session + +cluster = get_cluster() class TestIfNotExistsModel(Model): @@ -39,37 +42,37 @@ def tearDownClass(cls): class IfNotExistsInsertTests(BaseIfNotExistsTest): + @skipUnless(cluster.protocol_version >= 2, "only runs against the cql3 protocol v2.0") def test_insert_if_not_exists_success(self): """ tests that insertion with if_not_exists work as expected """ - if CASSANDRA_VERSION >= 20: - id = uuid4() + id = uuid4() - TestIfNotExistsModel.create(id=id, count=8, text='123456789') - TestIfNotExistsModel.if_not_exists().create(id=id, count=9, text='111111111111') + TestIfNotExistsModel.create(id=id, count=8, text='123456789') + TestIfNotExistsModel.if_not_exists().create(id=id, count=9, text='111111111111') - q = TestIfNotExistsModel.objects(id=id) - self.assertEqual(len(q), 1) + q = TestIfNotExistsModel.objects(id=id) + self.assertEqual(len(q), 1) - tm = q.first() - self.assertEquals(tm.count, 8) - self.assertEquals(tm.text, '123456789') + tm = q.first() + self.assertEquals(tm.count, 8) + self.assertEquals(tm.text, '123456789') + @skipUnless(cluster.protocol_version >= 2, "only runs against the cql3 protocol v2.0") def test_insert_if_not_exists_failure(self): """ tests that insertion with if_not_exists failure """ - if CASSANDRA_VERSION >= 20: - id = uuid4() + id = uuid4() - TestIfNotExistsModel.create(id=id, count=8, text='123456789') - TestIfNotExistsModel.create(id=id, count=9, text='111111111111') + TestIfNotExistsModel.create(id=id, count=8, text='123456789') + TestIfNotExistsModel.create(id=id, count=9, text='111111111111') - q = TestIfNotExistsModel.objects(id=id) - self.assertEquals(len(q), 1) + q = TestIfNotExistsModel.objects(id=id) + self.assertEquals(len(q), 1) - tm = q.first() - self.assertEquals(tm.count, 9) - self.assertEquals(tm.text, '111111111111') + tm = q.first() + self.assertEquals(tm.count, 9) + self.assertEquals(tm.text, '111111111111') class IfNotExistsModelTest(BaseIfNotExistsTest): From 2b076264e70e8de2a762f2623aed0bfa4eb0cf79 Mon Sep 17 00:00:00 2001 From: "mission.liao" Date: Sat, 16 Aug 2014 22:06:53 +0800 Subject: [PATCH 0945/3726] add test case for batch query --- cqlengine/tests/test_ifnotexists.py | 61 +++++++++++++++++++++++------ 1 file changed, 49 insertions(+), 12 deletions(-) diff --git a/cqlengine/tests/test_ifnotexists.py b/cqlengine/tests/test_ifnotexists.py index 1483475810..0dc33f2866 100644 --- a/cqlengine/tests/test_ifnotexists.py +++ b/cqlengine/tests/test_ifnotexists.py @@ -2,10 +2,11 @@ from cqlengine.management import sync_table, drop_table, create_keyspace, delete_keyspace from cqlengine.tests.base import BaseCassEngTestCase from cqlengine.models import Model -from cqlengine import columns +from cqlengine import columns, BatchQuery from uuid import uuid4 +from datetime import datetime import mock -from cqlengine.connection import get_cluster, get_session +from cqlengine.connection import get_cluster cluster = get_cluster() @@ -58,7 +59,6 @@ def test_insert_if_not_exists_success(self): self.assertEquals(tm.count, 8) self.assertEquals(tm.text, '123456789') - @skipUnless(cluster.protocol_version >= 2, "only runs against the cql3 protocol v2.0") def test_insert_if_not_exists_failure(self): """ tests that insertion with if_not_exists failure """ @@ -74,15 +74,48 @@ def test_insert_if_not_exists_failure(self): self.assertEquals(tm.count, 9) self.assertEquals(tm.text, '111111111111') + @skipUnless(cluster.protocol_version >= 2, "only runs against the cql3 protocol v2.0") + def test_batch_insert_if_not_exists_success(self): + """ tests that batch insertion with if_not_exists work as expected """ + + id = uuid4() + + with BatchQuery() as b: + TestIfNotExistsModel.batch(b).if_not_exists().create(id=id, count=8, text='123456789') + + with BatchQuery() as b: + TestIfNotExistsModel.batch(b).if_not_exists().create(id=id, count=9, text='111111111111') + + q = TestIfNotExistsModel.objects(id=id) + self.assertEqual(len(q), 1) + + tm = q.first() + self.assertEquals(tm.count, 8) + self.assertEquals(tm.text, '123456789') + + def test_batch_insert_if_not_exists_failure(self): + """ tests that batch insertion with if_not_exists failure """ + id = uuid4() + + with BatchQuery() as b: + TestIfNotExistsModel.batch(b).create(id=id, count=8, text='123456789') + with BatchQuery() as b: + TestIfNotExistsModel.batch(b).create(id=id, count=9, text='111111111111') + + q = TestIfNotExistsModel.objects(id=id) + self.assertEquals(len(q), 1) + + tm = q.first() + self.assertEquals(tm.count, 9) + self.assertEquals(tm.text, '111111111111') + class IfNotExistsModelTest(BaseIfNotExistsTest): def test_if_not_exists_included_on_create(self): """ tests that if_not_exists on models works as expected """ - session = get_session() - - with mock.patch.object(session, 'execute') as m: + with mock.patch.object(self.session, 'execute') as m: TestIfNotExistsModel.if_not_exists().create(count=8) query = m.call_args[0][0].query_string @@ -91,9 +124,7 @@ def test_if_not_exists_included_on_create(self): def test_if_not_exists_included_on_save(self): """ tests if we correctly put 'IF NOT EXISTS' for insert statement """ - session = get_session() - - with mock.patch.object(session, 'execute') as m: + with mock.patch.object(self.session, 'execute') as m: tm = TestIfNotExistsModel(count=8) tm.if_not_exists(True).save() @@ -105,6 +136,14 @@ def test_queryset_is_returned_on_class(self): qs = TestIfNotExistsModel.if_not_exists() self.assertTrue(isinstance(qs, TestIfNotExistsModel.__queryset__), type(qs)) + def test_batch_if_not_exists(self): + """ ensure 'IF NOT EXISTS' exists in statement when in batch """ + with mock.patch.object(self.session, 'execute') as m: + with BatchQuery() as b: + TestIfNotExistsModel.batch(b).if_not_exists().create(count=8) + + self.assertIn("IF NOT EXISTS", m.call_args[0][0].query_string) + class IfNotExistsInstanceTest(BaseIfNotExistsTest): @@ -122,13 +161,11 @@ def test_if_not_exists_is_not_include_with_query_on_update(self): """ make sure we don't put 'IF NOT EXIST' in update statements """ - session = get_session() - o = TestIfNotExistsModel.create(text="whatever") o.text = "new stuff" o = o.if_not_exists(True) - with mock.patch.object(session, 'execute') as m: + with mock.patch.object(self.session, 'execute') as m: o.save() query = m.call_args[0][0].query_string From 15e40374f932f9ac58b639ddcd472f0c1306ed22 Mon Sep 17 00:00:00 2001 From: "mission.liao" Date: Sat, 16 Aug 2014 23:32:12 +0800 Subject: [PATCH 0946/3726] refine doc --- docs/topics/models.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/topics/models.rst b/docs/topics/models.rst index 359169c016..5a5226d0a3 100644 --- a/docs/topics/models.rst +++ b/docs/topics/models.rst @@ -188,13 +188,13 @@ Model Methods Sets the ttl values to run instance updates and inserts queries with. - .. method:: if_not_exists(enable_or_not) + .. method:: if_not_exists() Check the existence of an object before insertion. The existence of an object is determined by its primary key(s). And please note using this flag would incur performance cost. - This method supported on Cassandra 2.0 or later. + This method is supported on Cassandra 2.0 or later. .. method:: update(**values) From e6beb346473d15295ec1abbc64b472f13da79095 Mon Sep 17 00:00:00 2001 From: Tommaso Barbugli Date: Sun, 7 Sep 2014 19:26:44 +0200 Subject: [PATCH 0947/3726] fix static column detection --- cqlengine/management.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/cqlengine/management.py b/cqlengine/management.py index 502038ea32..124ae8ace1 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -236,16 +236,15 @@ def get_fields(model): # returns all fields that aren't part of the PK ks_name = model._get_keyspace() col_family = model.column_family_name(include_keyspace=False) - + field_types = ['regular', 'static'] query = "select * from system.schema_columns where keyspace_name = %s and columnfamily_name = %s" tmp = execute(query, [ks_name, col_family]) # Tables containing only primary keys do not appear to create # any entries in system.schema_columns, as only non-primary-key attributes # appear to be inserted into the schema_columns table - try: - return [Field(x['column_name'], x['validator']) for x in tmp if x['type'] == 'regular'] + return [Field(x['column_name'], x['validator']) for x in tmp if x['type'] in field_types] except KeyError: return [Field(x['column_name'], x['validator']) for x in tmp] # convert to Field named tuples From 74bf5c9f162bf561042c9485ec2f1027b5e69c5d Mon Sep 17 00:00:00 2001 From: "mission.liao" Date: Mon, 8 Sep 2014 18:19:20 +0800 Subject: [PATCH 0948/3726] add LWTException when insertion is not applied. --- cqlengine/exceptions.py | 1 + cqlengine/query.py | 19 +++++++++++++++++-- cqlengine/tests/test_ifnotexists.py | 12 ++++++++---- docs/topics/models.rst | 11 +++++++++++ 4 files changed, 37 insertions(+), 6 deletions(-) diff --git a/cqlengine/exceptions.py b/cqlengine/exceptions.py index 94a51b92ac..f40f8af700 100644 --- a/cqlengine/exceptions.py +++ b/cqlengine/exceptions.py @@ -4,3 +4,4 @@ class ModelException(CQLEngineException): pass class ValidationError(CQLEngineException): pass class UndefinedKeyspaceException(CQLEngineException): pass +class LWTException(CQLEngineException): pass diff --git a/cqlengine/query.py b/cqlengine/query.py index f6d5a2ee30..fd2f0072cf 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -6,7 +6,7 @@ from cqlengine.connection import execute -from cqlengine.exceptions import CQLEngineException, ValidationError +from cqlengine.exceptions import CQLEngineException, ValidationError, LWTException from cqlengine.functions import Token, BaseQueryFunction, QueryValue, UnicodeMixin #CQL 3 reference: @@ -22,6 +22,17 @@ class MultipleObjectsReturned(QueryException): pass import six + +def check_applied(result): + """ + check if result contains some column '[applied]' with false value, + if that value is false, it means our light-weight transaction didn't + applied to database. + """ + if result and '[applied]' in result[0] and result[0]['[applied]'] == False: + raise LWTException('') + + class AbstractQueryableColumn(UnicodeMixin): """ exposes cql query operators through pythons @@ -171,7 +182,8 @@ def execute(self): query_list.append('APPLY BATCH;') - execute('\n'.join(query_list), parameters, self._consistency) + tmp = execute('\n'.join(query_list), parameters, self._consistency) + check_applied(tmp) self.queries = [] self._execute_callbacks() @@ -790,6 +802,9 @@ def _execute(self, q): return self._batch.add_query(q) else: tmp = execute(q, consistency_level=self._consistency) + if self._if_not_exists: + check_applied(tmp) + return tmp def batch(self, batch_obj): diff --git a/cqlengine/tests/test_ifnotexists.py b/cqlengine/tests/test_ifnotexists.py index b1dbe5f900..202a1c03e9 100644 --- a/cqlengine/tests/test_ifnotexists.py +++ b/cqlengine/tests/test_ifnotexists.py @@ -2,9 +2,9 @@ from cqlengine.management import sync_table, drop_table, create_keyspace, delete_keyspace from cqlengine.tests.base import BaseCassEngTestCase from cqlengine.models import Model +from cqlengine.exceptions import LWTException from cqlengine import columns, BatchQuery from uuid import uuid4 -from datetime import datetime import mock from cqlengine.connection import get_cluster @@ -50,7 +50,10 @@ def test_insert_if_not_exists_success(self): id = uuid4() TestIfNotExistsModel.create(id=id, count=8, text='123456789') - TestIfNotExistsModel.if_not_exists().create(id=id, count=9, text='111111111111') + self.assertRaises( + LWTException, + TestIfNotExistsModel.if_not_exists().create, id=id, count=9, text='111111111111' + ) q = TestIfNotExistsModel.objects(id=id) self.assertEqual(len(q), 1) @@ -83,8 +86,9 @@ def test_batch_insert_if_not_exists_success(self): with BatchQuery() as b: TestIfNotExistsModel.batch(b).if_not_exists().create(id=id, count=8, text='123456789') - with BatchQuery() as b: - TestIfNotExistsModel.batch(b).if_not_exists().create(id=id, count=9, text='111111111111') + b = BatchQuery() + TestIfNotExistsModel.batch(b).if_not_exists().create(id=id, count=9, text='111111111111') + self.assertRaises(LWTException, b.execute) q = TestIfNotExistsModel.objects(id=id) self.assertEqual(len(q), 1) diff --git a/docs/topics/models.rst b/docs/topics/models.rst index 5a5226d0a3..618838ec04 100644 --- a/docs/topics/models.rst +++ b/docs/topics/models.rst @@ -194,6 +194,17 @@ Model Methods object is determined by its primary key(s). And please note using this flag would incur performance cost. + if the insertion didn't applied, a LWTException exception would be raised. + + *Example* + + .. code-block:: python + try: + TestIfNotExistsModel.if_not_exists().create(id=id, count=9, text='111111111111') + except LWTException as e: + # handle failure case + print e.existing # existing object + This method is supported on Cassandra 2.0 or later. .. method:: update(**values) From bacce7e269e05c1630d7b597e0e5d23ad7df9b7e Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 10 Sep 2014 09:57:01 -0700 Subject: [PATCH 0949/3726] making sure we're not altering a table when we apply sync_table twice with static colummn --- cqlengine/tests/management/test_management.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/cqlengine/tests/management/test_management.py b/cqlengine/tests/management/test_management.py index c2fb19afc2..70bb843648 100644 --- a/cqlengine/tests/management/test_management.py +++ b/cqlengine/tests/management/test_management.py @@ -270,5 +270,16 @@ class StaticModel(Model): statement = m.call_args[0][0].query_string assert '"name" text static' in statement, statement + # if we sync again, we should not apply an alter w/ a static + sync_table(StaticModel) + + with patch.object(session, "execute", wraps=session.execute) as m2: + sync_table(StaticModel) + + assert len(m2.call_args_list) == 1 + assert "ALTER" not in m2.call_args[0][0].query_string + + + From 91c19ec242ee33eb34e78ddb42c201dfa8b4c5c1 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 10 Sep 2014 10:12:13 -0700 Subject: [PATCH 0950/3726] trying to id why 1.2 is failing --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 9dfeb0fbbd..eaa431bbf1 100644 --- a/.travis.yml +++ b/.travis.yml @@ -19,7 +19,7 @@ before_install: - sudo apt-get -o Dpkg::Options::="--force-confnew" install -y cassandra - sudo sh -c "echo 'JVM_OPTS=\"\${JVM_OPTS} -Djava.net.preferIPv4Stack=false\"' >> /usr/local/cassandra/conf/cassandra-env.sh" - sudo service cassandra start - - if [ "$C_VERSION" -le "20" ]; then export CASSANDRA_PROTOCOL_VERSION=1; fi + - sleep 5 install: - "pip install -r requirements.txt --use-mirrors" From 813008d125b17245bd93bb7d7e28f474531fc0fd Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 10 Sep 2014 10:16:57 -0700 Subject: [PATCH 0951/3726] debugging travis --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index eaa431bbf1..c5be417530 100644 --- a/.travis.yml +++ b/.travis.yml @@ -19,7 +19,7 @@ before_install: - sudo apt-get -o Dpkg::Options::="--force-confnew" install -y cassandra - sudo sh -c "echo 'JVM_OPTS=\"\${JVM_OPTS} -Djava.net.preferIPv4Stack=false\"' >> /usr/local/cassandra/conf/cassandra-env.sh" - sudo service cassandra start - - sleep 5 + - ps aux | grep cassandra install: - "pip install -r requirements.txt --use-mirrors" From 259bc7a068ad299657fa0048ef94c1fa7d8734a6 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 10 Sep 2014 11:50:04 -0700 Subject: [PATCH 0952/3726] more debug --- .travis.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index c5be417530..6cae0a2bb2 100644 --- a/.travis.yml +++ b/.travis.yml @@ -19,7 +19,8 @@ before_install: - sudo apt-get -o Dpkg::Options::="--force-confnew" install -y cassandra - sudo sh -c "echo 'JVM_OPTS=\"\${JVM_OPTS} -Djava.net.preferIPv4Stack=false\"' >> /usr/local/cassandra/conf/cassandra-env.sh" - sudo service cassandra start - - ps aux | grep cassandra + - cat /etc/cassandra/cassandra-env.sh + - cat /etc/cassandra/cassandra.yaml install: - "pip install -r requirements.txt --use-mirrors" From 3211e8b0be14ac3def96133098e104de07f288bb Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 18 Sep 2014 10:38:40 -0500 Subject: [PATCH 0953/3726] Handle Unauthorized message on schema_triggers query For PYTHON-155, workaround CASSANDRA-7967 --- cassandra/cluster.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index de40876de4..54e1bd3e3b 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -42,7 +42,8 @@ from itertools import groupby from cassandra import (ConsistencyLevel, AuthenticationFailed, - InvalidRequest, OperationTimedOut, UnsupportedOperation) + InvalidRequest, OperationTimedOut, + UnsupportedOperation, Unauthorized) from cassandra.connection import ConnectionException, ConnectionShutdown from cassandra.encoder import Encoder from cassandra.protocol import (QueryMessage, ResultMessage, @@ -1970,6 +1971,10 @@ def _handle_results(success, result): if isinstance(triggers_result, InvalidRequest): log.debug("[control connection] triggers table not found") triggers_result = {} + elif isinstance(triggers_result, Unauthorized): + log.warn("[control connection] this version of Cassandra does not allow access to schema_triggers metadata with authorization enabled (CASSANDRA-7967); " + "The driver will operate normally, but will not reflect triggers in the local metadata model, or schema strings.") + triggers_result = {} else: raise triggers_result From 133185e495e645d40f0d3dd1d1e10cc5f43a1efd Mon Sep 17 00:00:00 2001 From: Victor Godoy Poluceno Date: Mon, 22 Sep 2014 16:53:50 -0300 Subject: [PATCH 0954/3726] Makes execute_concurrent function compatible with Python 2.6 --- cassandra/concurrent.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/concurrent.py b/cassandra/concurrent.py index 4ddef1ba0f..f79f7f0be2 100644 --- a/cassandra/concurrent.py +++ b/cassandra/concurrent.py @@ -83,7 +83,7 @@ def execute_concurrent(session, statements_and_parameters, concurrency=100, rais first_error = [] if raise_on_first_error else None to_execute = len(statements_and_parameters) results = [None] * to_execute - num_finished = count(start=1) + num_finished = count(1) statements = enumerate(iter(statements_and_parameters)) for i in xrange(min(concurrency, len(statements_and_parameters))): _execute_next(_sentinel, i, event, session, statements, results, None, num_finished, to_execute, first_error) From 185547e4a8a4566c6ca273c61c98e914311ed3e5 Mon Sep 17 00:00:00 2001 From: Irina Kaplounova Date: Sun, 5 Oct 2014 12:00:23 -0700 Subject: [PATCH 0955/3726] PYTHON-155 + authentication test --- .../standard/test_authentication.py | 495 ++++++++++++++++++ 1 file changed, 495 insertions(+) create mode 100644 tests/integration/standard/test_authentication.py diff --git a/tests/integration/standard/test_authentication.py b/tests/integration/standard/test_authentication.py new file mode 100644 index 0000000000..df84b0cda0 --- /dev/null +++ b/tests/integration/standard/test_authentication.py @@ -0,0 +1,495 @@ +# Copyright 2013-2014 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import time +import socket + +from ccmlib import common +from ccmlib.cluster import Cluster as CCMCluster + +from tests.integration import setup_package, teardown_package, PROTOCOL_VERSION, CASSANDRA_DIR, CASSANDRA_VERSION, path +from cassandra.cluster import Cluster, NoHostAvailable, Unauthorized, InvalidRequest +from cassandra.auth import PlainTextAuthProvider + + +try: + import unittest2 as unittest +except ImportError: + import unittest + +log = logging.getLogger(__name__) + +AUTH_CLUSTER_NAME = 'auth_test_cluster' + + +def wait_for_cassandra(port=7000, maxwait=120): + """ + Wait until there is connection to specified port. Use cassandra port 7000 by default. + :param port: port to try + :param maxwait: maximum wait time in seconds + :return: True if can connect within 2 minutes, False otherwise + """ + sleeptime = 2 + server_address = ('localhost', port) + + wait_time = 0 + while wait_time < maxwait: + try: + s = socket.create_connection(server_address) + s.close() + log.debug("Cassandra ready after %d seconds" % wait_time) + return True + except socket.error: + wait_time += sleeptime + time.sleep(sleeptime) + + return False + + +def try_connecting(username='', password=''): + """ + Wait until can connect to cluster. + When cluster starts up there is some time while it is not possible to connect to it even though + Cassandra is listening on port 7000. Here we wait until we can actually issue successful Cluster.connect() + method. + :param username: optional user name for connection + :param password: optional password for connection + :return: True if can successfully connect to cluster within 2 minutes, False otherwise + """ + if username and password: + ap = PlainTextAuthProvider(username=username, password=password) + else: + ap = None + maxwait = 120 # in seconds + sleeptime = 1 + + wait_time = 0 + while wait_time < maxwait: + try: + cluster = Cluster(protocol_version=PROTOCOL_VERSION, auth_provider=ap) + cluster.connect() + log.debug("Can connect after %d seconds" % wait_time) + return True + except Exception: + wait_time += sleeptime + time.sleep(sleeptime) + + return False + + +###################################################### +# Module level setup and teardown +# +def setup_module(): + """ + Stop existing cluster started by generic tests init + """ + teardown_package() + + +def teardown_module(): + """ + Start generic tests cluster + """ + setup_package() + wait_for_cassandra() + try_connecting() + +# +# End of Module level setup and teardown +#################################################### + + +class AuthenticationTests(unittest.TestCase): + """ + Tests to cover basic authentication functionality + """ + + ccm_cluster = None + + password = '12345' + suser = 'new_suser' + test_user = 'test_user' + test_other_user = 'other_user' + + ########################## + # Class level setup and teardown + ## + @classmethod + def setup_class(cls): + """ + Class-level fixture. Called once for all tests. + We create a cluster object here and save it in the provided class instance + Create 2 regular users and a new superuser. + Enable authentication by setting 'authenticator': 'PasswordAuthenticator'. + If invoked from authorisation class enable authorization by setting 'authorizer': 'CassandraAuthorizer'. + """ + try: + try: + ccm_cluster = CCMCluster.load(path, AUTH_CLUSTER_NAME) + log.debug("Found existing ccm test authentication cluster, removing") + ccm_cluster.remove() + except Exception: + log.debug("Can not load cluster %s ....." % AUTH_CLUSTER_NAME) + + log.debug("Creating new ccm test authentication cluster") + if CASSANDRA_DIR: + ccm_cluster = CCMCluster(path, AUTH_CLUSTER_NAME, cassandra_dir=CASSANDRA_DIR) + else: + ccm_cluster = CCMCluster(path, AUTH_CLUSTER_NAME, cassandra_version=CASSANDRA_VERSION) + + ccm_cluster.set_configuration_options({'start_native_transport': True}) + ccm_cluster.set_configuration_options({'authenticator': 'PasswordAuthenticator'}) + + # + # This method is called either with AuthenticationTests class or with AuthorizedAuthenticationTests class. + # In the second case we enable CassandraAuthorizer + # + if cls.__name__ == 'AuthorizedAuthenticationTests': + print "Running tests with Cassandra Authorizer Enabled" + log.info("Running tests with Cassandra Authorizer Enabled") + ccm_cluster.set_configuration_options({'authorizer': 'CassandraAuthorizer'}) + else: + print "Running tests with Cassandra Authorizer Disabled" + log.info("Running tests with Cassandra Authorizer Disabled") + + common.switch_cluster(path, AUTH_CLUSTER_NAME) + ccm_cluster.populate(1) + + log.debug("Starting ccm test authentication cluster") + ccm_cluster.start(wait_for_binary_proto=True) + except Exception: + log.exception("Failed to start ccm authentication cluster:") + raise + + if not wait_for_cassandra() or not try_connecting('cassandra', 'cassandra'): + log.exception("Can not talk to cassandra") + raise + + log.debug("Switched to AUTH_CLUSTER_NAME cluster") + cls.ccm_cluster = ccm_cluster + + cls.root_cluster = cls.cluster_as('cassandra', 'cassandra') + cls.root_session = cls.root_cluster.connect() + cls.create_user(cls.root_cluster, cls.test_user, cls.password) + cls.create_user(cls.root_cluster, cls.test_other_user, cls.password) + cls.create_user(cls.root_cluster, cls.suser, cls.password, su=True) + + @classmethod + def tearDownClass(cls): + """ + Remove authentication cluster + """ + cls.ccm_cluster.remove() + + ## + # End of class-level setup and teardown + ######################################################## + + def tearDown(self): + """ + Class method fixture setup. Called after each test + Reverse settings for users to test default values + """ + self.session = self.root_cluster.connect() + self.session.execute("ALTER USER %s WITH PASSWORD '%s' SUPERUSER " % (self.suser, self.password)) + self.session.execute("ALTER USER %s WITH PASSWORD '%s' NOSUPERUSER " % (self.test_other_user, self.password)) + self.session.execute("ALTER USER %s WITH PASSWORD '%s' NOSUPERUSER " % (self.test_user, self.password)) + + @classmethod + def enable_authorizer(cls): + cls.authorizer = True + + @staticmethod + def create_user(cluster, usr, pwd, su=False): + """ + Create user with given username and password and optional status + """ + status = '' + if su: + status = 'SUPERUSER' + session = cluster.connect() + session.execute("CREATE USER IF NOT EXISTS %s WITH PASSWORD '%s' %s " % (usr, pwd, status)) + session.shutdown() + + @staticmethod + def list_users(cluster): + """ + List existing cluster users + :param cluster: cluster object + :return: list of user names + """ + return AuthenticationTests.get_user_data(cluster).keys() + + @staticmethod + def get_user_data(cluster): + """ + Get information about users as a dictionary with user name and superuser status + :param cluster: cluster to use + :return: dictionary with user name as key and su status as value + """ + session = cluster.connect() + result = session.execute("LIST USERS") + users = dict() + for row in result: + users[row.name] = row.super + session.shutdown() + return users + + @staticmethod + def cluster_as(usr, pwd): + """ + Create CLuster object authenticated with specified user and password + :rtype : Cluster + :param usr: username + :param pwd: user password + :return: CLuster object + """ + return Cluster(protocol_version=PROTOCOL_VERSION, + auth_provider=PlainTextAuthProvider(username=usr, + password=pwd)) + + def test_new_user_created(self): + """ + new_user_created: superuser can create users + """ + self.assertIn(self.test_user, self.list_users(self.root_cluster), 'user %s is missing' % self.test_user) + self.assertIn(self.suser, self.list_users(self.root_cluster), 'user %s is missing' % self.suser) + + def test_user_status(self): + """ + user_status: regular user and superuser have right status in LIST USERS output + """ + users = self.get_user_data(self.root_cluster) + self.assertIn(self.test_user, users.keys()) + self.assertEqual(users[self.test_user], False) + + def test_user_can_connect(self): + """ + user_can_connect: regular user can connect to C* and LIST USERS + """ + cluster = self.cluster_as(self.test_user, self.password) + # Verify that regular user can LIST USERS + self.assertIn(self.test_user, self.list_users(cluster), 'user %s is missing' % self.test_user) + + def test_new_superuser_can_connect(self): + """ + new_superuser_can_connect: new superuser can connect to C* and LIST USERS + """ + # First verify that suser is created + self.assertIn(self.suser, self.list_users(self.root_cluster), 'user %s is missing' % self.suser) + # Now verify that we can connect as suser + cluster = self.cluster_as(self.suser, self.password) + self.assertIn(self.suser, self.list_users(cluster), 'user %s is missing' % self.suser) + + def test_user_cannot_create_user(self): + """ + user_cannot_create_user: regular user can not CREATE USER + """ + cluster = self.cluster_as(self.test_user, self.password) + self.assertRaisesRegexp(Unauthorized, + '.*Only superusers are allowed to perform CREATE USER queries.*', + lambda: self.create_user(cluster, 'test_user_1', '12345')) + + def test_user_cannot_change_su_pwd(self): + """ + user_cannot_change_su_pwd: regular user cannot change password for superuser + """ + cluster = self.cluster_as(self.test_user, self.password) + session = cluster.connect() + stmt = "ALTER USER cassandra WITH PASSWORD 'new_su_pwd' " + self.assertRaisesRegexp(Unauthorized, + ".*You aren't allowed to alter this user.*", + lambda: session.execute(stmt)) + + def test_user_cannot_change_other_user_pwd(self): + """ + user_cannot_change_other_user_pwd: regular user cannot change password for other user + """ + cluster = self.cluster_as(self.test_user, self.password) + session = cluster.connect() + stmt = "ALTER USER other_user WITH PASSWORD 'new_reg_pwd' " + self.assertRaisesRegexp(Unauthorized, + ".*You aren't allowed to alter this user.*", + lambda: session.execute(stmt)) + + def test_regular_user_can_change_his_pwd(self): + """ + regular_user_can_change_his_pwd: regular user can change his password and connect to C* with new pwd + and cannot connect with old pwd + """ + cluster = self.cluster_as(self.test_user, self.password) + session = cluster.connect() + + new_password = 'AxaXax' + session.execute("ALTER USER %s WITH PASSWORD '%s' " % (self.test_user, new_password)) + session.shutdown() + + # and can connect to the cluster with new password + cluster = self.cluster_as(self.test_user, new_password) + session = cluster.connect() + session.shutdown() + # Should be able to execute some commands + self.assertIn(self.test_user, self.list_users(cluster), 'user %s is missing' % self.test_user) + + # Cannot connect with old pwd + cluster = self.cluster_as(self.test_user, self.password) + self.assertRaisesRegexp(NoHostAvailable, + '.*Bad credentials.*Username and/or ' + 'password are incorrect.*', + cluster.connect) + + def test_su_can_alter_reg_user_status(self): + """ + su_can_alter_reg_user_status: superuser can change regular user status + """ + session = self.root_cluster.connect() + + # Turn test_user into superuser + session.execute("ALTER USER %s WITH PASSWORD '%s' SUPERUSER " % (self.test_user, self.password)) + users = self.get_user_data(self.root_cluster) + self.assertEqual(users[self.test_user], True, msg="%s has SUPERUSER set to %s" + % (self.test_user, users[self.test_user])) + + def test_su_can_alter_other_su_status(self): + """ + su_can_alter_other_su_status: superuser can change other superuser status + """ + session = self.root_cluster.connect() + + # Turn suser into regular user + session.execute("ALTER USER %s WITH PASSWORD '%s' NOSUPERUSER " % (self.suser, self.password)) + users = self.get_user_data(self.root_cluster) + self.assertEqual(users[self.suser], False, msg="%s has SUPERUSER set to %s" % (self.suser, users[self.suser])) + + def test_su_cannot_alter_his_status(self): + """ + su_cannot_alter_his_status: superuser can't change his status + """ + session = self.root_cluster.connect() + stmt = "ALTER USER cassandra WITH PASSWORD 'cassandra' NOSUPERUSER " + self.assertRaisesRegexp(Unauthorized, + ".*You aren't allowed to alter your own superuser status.*", + lambda: session.execute(stmt)) + + def test_su_can_change_her_pwd(self): + """ + su_can_change_her_pwd: superuser can change her password and connect to C* with new pwd + and cannot connect with old pwd + """ + cluster = self.cluster_as(self.suser, self.password) + session = cluster.connect() + + new_password = 'AxaXax' + session.execute("ALTER USER %s WITH PASSWORD '%s' " % (self.suser, new_password)) + session.shutdown() + + # and can connect to the cluster with new password + cluster = self.cluster_as(self.suser, new_password) + session = cluster.connect() + # Should be able to execute some commands + self.assertIn(self.test_user, self.list_users(cluster), 'user %s is missing' % self.suser) + session.shutdown() + # Cannot connect with old pwd + cluster = self.cluster_as(self.suser, self.password) + self.assertRaisesRegexp(NoHostAvailable, + '.*Bad credentials.*Username and/or ' + 'password are incorrect.*', + cluster.connect) + + def test_su_can_drop_other_superuser(self): + """ + su_can_drop_other_superuser: superuser can drop other superuser + """ + session = self.root_cluster.connect() + self.create_user(self.root_cluster, 'temp_su_user', self.password, su=True) + session.execute("DROP USER temp_su_user") + self.assertNotIn('temp_du_user', self.list_users(self.root_cluster), 'user temp_su_user NOT dropped') + + def test_su_can_drop_regular_user(self): + """ + su_can_drop_regular_user: superuser can drop regular user + """ + session = self.root_cluster.connect() + self.create_user(self.root_cluster, 'temp_reg_user', self.password) + session.execute("DROP USER temp_reg_user") + self.assertNotIn('temp_reg_user', self.list_users(self.root_cluster), 'user temp_reg_user NOT dropped') + + def test_su_cannot_drop_herself(self): + """ + su_cannot_drop_herself: superuser can't drop herself + """ + self.create_user(self.root_cluster, 'temp_su_user', self.password, su=True) + cluster = self.cluster_as('temp_su_user', self.password) + self.session = cluster.connect() + stmt = "DROP USER temp_su_user" + self.assertRaisesRegexp(InvalidRequest, + ".*Users aren't allowed to DROP themselves.*", + lambda: self.session.execute(stmt)) + + def test_su_connect_wrong_pwd(self): + """ + su_connect_wrong_pwd: can't connect with bad password + Verifies that trying to connect with wrong password triggers + "AuthenticationFailed /Bad credentials / Username and/or + password are incorrect" + """ + cluster = self.cluster_as('cassandra', 'who_is_cassandra') + self.assertRaisesRegexp(NoHostAvailable, + '.*AuthenticationFailed.*Bad credentials.*Username and/or ' + 'password are incorrect.*', + cluster.connect) + + def test_su_connect_wrong_username(self): + """ + su_connect_wrong_username: can't connect with bad username + Try to connect with wrong superuser name fails with: "AuthenticationFailed /Bad credentials / Username and/or + password are incorrect" + """ + cluster = self.cluster_as('Cassandra', 'cassandra') + self.assertRaisesRegexp(NoHostAvailable, + '.*AuthenticationFailed.*Bad credentials.*Username and/or ' + 'password are incorrect.*', + cluster.connect) + + def test_su_connect_empty_pwd(self): + """ + su_connect_empty_pwd: can't connect with an empty password + Empty superuser password triggers "AuthenticationFailed /Bad credentials / + Username and/or password are incorrect" + """ + cluster = self.cluster_as('Cassandra', '') + self.assertRaisesRegexp(NoHostAvailable, + '.*AuthenticationFailed.*Bad credentials.*Username and/or ' + 'password are incorrect.*', + cluster.connect) + + def test_su_connect_no_user_specified(self): + """ + su_connect_no_user_specified: can't connect with an empty user + No user specified triggers "AuthenticationFailed /Remote end requires authentication + """ + cluster = Cluster(protocol_version=PROTOCOL_VERSION) + self.assertRaisesRegexp(NoHostAvailable, + '.*AuthenticationFailed.*Remote end requires authentication.*', + cluster.connect) + + +class AuthorizedAuthenticationTests(AuthenticationTests): + """ + Same test as AuthenticationTests but enables authorization + """ + pass + From 99e582bcab045b45516a9dd4816bb5caa8ce4bd7 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Sun, 5 Oct 2014 16:14:05 -0700 Subject: [PATCH 0956/3726] cassandra 2.1 travis testing --- .travis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.travis.yml b/.travis.yml index 6cae0a2bb2..55f6ed58be 100644 --- a/.travis.yml +++ b/.travis.yml @@ -3,6 +3,7 @@ language: python env: - CASSANDRA_VERSION=12 - CASSANDRA_VERSION=20 + - CASSANDRA_VERSION=21 python: - "2.7" From a332c0ca8997dd570262d7ef9da33c9c303a8224 Mon Sep 17 00:00:00 2001 From: Irina Kaplounova Date: Mon, 6 Oct 2014 22:21:24 -0700 Subject: [PATCH 0957/3726] Test should skip if run with PROTOCOL_VERSION=1 --- tests/integration/standard/test_authentication.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/tests/integration/standard/test_authentication.py b/tests/integration/standard/test_authentication.py index df84b0cda0..ee9e32a526 100644 --- a/tests/integration/standard/test_authentication.py +++ b/tests/integration/standard/test_authentication.py @@ -94,8 +94,14 @@ def try_connecting(username='', password=''): # def setup_module(): """ + Test is skipped if run with cql version < 2 Stop existing cluster started by generic tests init """ + + if PROTOCOL_VERSION < 2: + raise unittest.SkipTest( + "SASL Authentication is not supported in version 1 of the protocol") + teardown_package() @@ -134,7 +140,7 @@ def setup_class(cls): We create a cluster object here and save it in the provided class instance Create 2 regular users and a new superuser. Enable authentication by setting 'authenticator': 'PasswordAuthenticator'. - If invoked from authorisation class enable authorization by setting 'authorizer': 'CassandraAuthorizer'. + If invoked from authorization class enable authorization by setting 'authorizer': 'CassandraAuthorizer'. """ try: try: From dfac16e4dfca76dbed5badac90c256ddd5fb5e31 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Tue, 7 Oct 2014 12:37:30 -0700 Subject: [PATCH 0958/3726] maybe fixed connection issue --- .travis.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 55f6ed58be..2e801c7415 100644 --- a/.travis.yml +++ b/.travis.yml @@ -16,9 +16,11 @@ before_install: - sudo gpg --export --armor F758CE318D77295D | sudo apt-key add - - sudo gpg --keyserver pgp.mit.edu --recv-keys 2B5C1B00 - sudo gpg --export --armor 2B5C1B00 | sudo apt-key add - + - sudo gpg --keyserver pgp.mit.edu --recv-keys 0353B12C + - sudo gpg --export --armor 0353B12C | sudo apt-key add - - sudo apt-get update - sudo apt-get -o Dpkg::Options::="--force-confnew" install -y cassandra - - sudo sh -c "echo 'JVM_OPTS=\"\${JVM_OPTS} -Djava.net.preferIPv4Stack=false\"' >> /usr/local/cassandra/conf/cassandra-env.sh" + - sudo sh -c "echo 'JVM_OPTS=\"\${JVM_OPTS} -Djava.net.preferIPv4Stack=false\"' >> /etc/cassandra/cassandra-env.sh" - sudo service cassandra start - cat /etc/cassandra/cassandra-env.sh - cat /etc/cassandra/cassandra.yaml From 1902f225c78867e66755b3fa52b4cf5308e56027 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Tue, 7 Oct 2014 12:58:44 -0700 Subject: [PATCH 0959/3726] get running list of procs matching cassandra --- .travis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.travis.yml b/.travis.yml index 2e801c7415..f1b53d9a01 100644 --- a/.travis.yml +++ b/.travis.yml @@ -22,6 +22,7 @@ before_install: - sudo apt-get -o Dpkg::Options::="--force-confnew" install -y cassandra - sudo sh -c "echo 'JVM_OPTS=\"\${JVM_OPTS} -Djava.net.preferIPv4Stack=false\"' >> /etc/cassandra/cassandra-env.sh" - sudo service cassandra start + - ps aux | grep cassandra - cat /etc/cassandra/cassandra-env.sh - cat /etc/cassandra/cassandra.yaml From 16c919bcb060597c60a33700dc7104dfc8873636 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Tue, 7 Oct 2014 13:02:31 -0700 Subject: [PATCH 0960/3726] get the output --- .travis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.travis.yml b/.travis.yml index f1b53d9a01..b55a4ed1e9 100644 --- a/.travis.yml +++ b/.travis.yml @@ -23,6 +23,7 @@ before_install: - sudo sh -c "echo 'JVM_OPTS=\"\${JVM_OPTS} -Djava.net.preferIPv4Stack=false\"' >> /etc/cassandra/cassandra-env.sh" - sudo service cassandra start - ps aux | grep cassandra + - sudo cat /var/log/cassandra/output.log - cat /etc/cassandra/cassandra-env.sh - cat /etc/cassandra/cassandra.yaml From e2917ff1ad3946808100d6cfd142ca6ee64ea1fa Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Tue, 7 Oct 2014 13:08:27 -0700 Subject: [PATCH 0961/3726] netstat to debug --- .travis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.travis.yml b/.travis.yml index b55a4ed1e9..128714ccc4 100644 --- a/.travis.yml +++ b/.travis.yml @@ -26,6 +26,7 @@ before_install: - sudo cat /var/log/cassandra/output.log - cat /etc/cassandra/cassandra-env.sh - cat /etc/cassandra/cassandra.yaml + - sudo netstat install: - "pip install -r requirements.txt --use-mirrors" From 78749272dbe96ec79dc77734d6cc18903b67f48a Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Tue, 7 Oct 2014 13:28:15 -0700 Subject: [PATCH 0962/3726] more debug --- .travis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.travis.yml b/.travis.yml index 128714ccc4..f1c4f69b82 100644 --- a/.travis.yml +++ b/.travis.yml @@ -22,6 +22,7 @@ before_install: - sudo apt-get -o Dpkg::Options::="--force-confnew" install -y cassandra - sudo sh -c "echo 'JVM_OPTS=\"\${JVM_OPTS} -Djava.net.preferIPv4Stack=false\"' >> /etc/cassandra/cassandra-env.sh" - sudo service cassandra start + - sleep 5 - ps aux | grep cassandra - sudo cat /var/log/cassandra/output.log - cat /etc/cassandra/cassandra-env.sh From 4404134352448d0f82d53d7842871d53e6c41e88 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Tue, 7 Oct 2014 13:32:45 -0700 Subject: [PATCH 0963/3726] remove old data files which are causing 1.2 build to fail --- .travis.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.travis.yml b/.travis.yml index f1c4f69b82..bf546efd99 100644 --- a/.travis.yml +++ b/.travis.yml @@ -20,9 +20,11 @@ before_install: - sudo gpg --export --armor 0353B12C | sudo apt-key add - - sudo apt-get update - sudo apt-get -o Dpkg::Options::="--force-confnew" install -y cassandra + - sudo rm -rf /var/lib/cassandra/* - sudo sh -c "echo 'JVM_OPTS=\"\${JVM_OPTS} -Djava.net.preferIPv4Stack=false\"' >> /etc/cassandra/cassandra-env.sh" - sudo service cassandra start - sleep 5 + - ps aux | grep cassandra - sudo cat /var/log/cassandra/output.log - cat /etc/cassandra/cassandra-env.sh From ce8d6cfb706176376528426ca3049a480cf8d566 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Tue, 7 Oct 2014 13:39:07 -0700 Subject: [PATCH 0964/3726] removed debugging code --- .travis.yml | 7 ------- 1 file changed, 7 deletions(-) diff --git a/.travis.yml b/.travis.yml index bf546efd99..96044d1e57 100644 --- a/.travis.yml +++ b/.travis.yml @@ -23,13 +23,6 @@ before_install: - sudo rm -rf /var/lib/cassandra/* - sudo sh -c "echo 'JVM_OPTS=\"\${JVM_OPTS} -Djava.net.preferIPv4Stack=false\"' >> /etc/cassandra/cassandra-env.sh" - sudo service cassandra start - - sleep 5 - - - ps aux | grep cassandra - - sudo cat /var/log/cassandra/output.log - - cat /etc/cassandra/cassandra-env.sh - - cat /etc/cassandra/cassandra.yaml - - sudo netstat install: - "pip install -r requirements.txt --use-mirrors" From e6b6e5c7d598fdcc24b3484ed4f2489e45281dbd Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Tue, 7 Oct 2014 14:03:11 -0700 Subject: [PATCH 0965/3726] updated dev requirements, skipping static column test in < 2.0 --- cqlengine/tests/management/test_management.py | 13 ++++++------- requirements-dev.txt | 1 + requirements.txt | 2 -- 3 files changed, 7 insertions(+), 9 deletions(-) diff --git a/cqlengine/tests/management/test_management.py b/cqlengine/tests/management/test_management.py index 70bb843648..757e799002 100644 --- a/cqlengine/tests/management/test_management.py +++ b/cqlengine/tests/management/test_management.py @@ -8,8 +8,9 @@ from cqlengine.tests.query.test_queryset import TestModel from cqlengine.models import Model from cqlengine import columns, SizeTieredCompactionStrategy, LeveledCompactionStrategy - - +from unittest import skipUnless +from cqlengine.connection import get_cluster +cluster = get_cluster() class CreateKeyspaceTest(BaseCassEngTestCase): def test_create_succeeeds(self): @@ -247,6 +248,7 @@ def test_failure(self): sync_table(self.FakeModel) +@skipUnless(cluster.protocol_version >= 2, "only runs against the cql3 protocol v2.0") def test_static_columns(): class StaticModel(Model): id = columns.Integer(primary_key=True) @@ -260,11 +262,8 @@ class StaticModel(Model): from cqlengine.connection import get_session session = get_session() - with patch.object(session, "execute", side_effect=Exception) as m: - try: - sync_table(StaticModel) - except: - pass + with patch.object(session, "execute", wraps=session.execute) as m: + sync_table(StaticModel) assert m.call_count > 0 statement = m.call_args[0][0].query_string diff --git a/requirements-dev.txt b/requirements-dev.txt index 2773a99fe4..748d8bbe54 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -1,3 +1,4 @@ +-r requirements.txt nose nose-progressive profilestats diff --git a/requirements.txt b/requirements.txt index b9bac81778..dd822f6853 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,3 @@ -ipython -ipdb Sphinx mock sure==1.2.5 From 95ad0f0f3b18493975f681e72f31653c4f7dbb68 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 7 Oct 2014 16:57:27 -0500 Subject: [PATCH 0966/3726] Python SortedSet for platforms without blist Fixes PYTHON-104 Fixes PYTHON-167 Addresses CASSANDRA-8030 For nested UDTs and collections, the set we return must accommodate non-hashable objects (built-in set does not). --- cassandra/cqltypes.py | 16 +- cassandra/encoder.py | 12 +- cassandra/util.py | 206 ++++++++++++++++++- tests/integration/standard/test_types.py | 19 +- tests/unit/test_marshalling.py | 11 +- tests/unit/test_sortedset.py | 245 +++++++++++++++++++++++ 6 files changed, 466 insertions(+), 43 deletions(-) create mode 100644 tests/unit/test_sortedset.py diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index 05d867446e..b177e1417e 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -47,26 +47,17 @@ int32_pack, int32_unpack, int64_pack, int64_unpack, float_pack, float_unpack, double_pack, double_unpack, varint_pack, varint_unpack) -from cassandra.util import OrderedDict +from cassandra.util import OrderedDict, SortedSet apache_cassandra_type_prefix = 'org.apache.cassandra.db.marshal.' + if six.PY3: _number_types = frozenset((int, float)) long = int else: _number_types = frozenset((int, long, float)) -try: - from blist import sortedset -except ImportError: - warnings.warn( - "The blist library is not available, so a normal set will " - "be used in place of blist.sortedset for set collection values. " - "You can find the blist library here: https://pypi.python.org/pypi/blist/") - - sortedset = set - def trim_if_startswith(s, prefix): if s.startswith(prefix): @@ -77,6 +68,7 @@ def trim_if_startswith(s, prefix): def unix_time_from_uuid1(u): return (u.time - 0x01B21DD213814000) / 10000000.0 + _casstypes = {} @@ -720,7 +712,7 @@ class ListType(_SimpleParameterizedType): class SetType(_SimpleParameterizedType): typename = 'set' num_subtypes = 1 - adapter = sortedset + adapter = SortedSet class MapType(_ParameterizedType): diff --git a/cassandra/encoder.py b/cassandra/encoder.py index d4a0a2b48e..1abb811c4a 100644 --- a/cassandra/encoder.py +++ b/cassandra/encoder.py @@ -28,7 +28,7 @@ from uuid import UUID import six -from cassandra.util import OrderedDict +from cassandra.util import OrderedDict, SortedSet if six.PY3: long = int @@ -79,6 +79,7 @@ def __init__(self): list: self.cql_encode_list_collection, tuple: self.cql_encode_list_collection, set: self.cql_encode_set_collection, + SortedSet: self.cql_encode_set_collection, frozenset: self.cql_encode_set_collection, types.GeneratorType: self.cql_encode_list_collection, ValueSequence: self.cql_encode_sequence @@ -98,15 +99,6 @@ def __init__(self): type(None): self.cql_encode_none, }) - # sortedset is optional - try: - from blist import sortedset - self.mapping.update({ - sortedset: self.cql_encode_set_collection - }) - except ImportError: - pass - def cql_encode_none(self, val): """ Converts :const:`None` to the string 'NULL'. diff --git a/cassandra/util.py b/cassandra/util.py index 640690a409..15e2fecf0e 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -126,7 +126,7 @@ def __eq__(self, other): if isinstance(other, OrderedDict): if len(self) != len(other): return False - for p, q in zip(self.items(), other.items()): + for p, q in zip(self.items(), other.items()): if p != q: return False return True @@ -349,3 +349,207 @@ def union(self, other): def isdisjoint(self, other): return len(self.intersection(other)) == 0 + +try: + from blist import sortedset as SortedSet +except ImportError: + + import warnings + + warnings.warn( + "The blist library is not available, so a pure python list-based set will " + "be used in place of blist.sortedset for set collection values. " + "You can find the blist library here: https://pypi.python.org/pypi/blist/") + + from bisect import bisect_left + + class SortedSet(object): + ''' + A sorted set based on sorted list + + This set is used in place of blist.sortedset in Python environments + where blist module/extension is not available. + + A sorted set implementation is used in this case because it does not + require its elements to be immutable/hashable. + + #Not implemented: update functions, inplace operators + + ''' + + def __init__(self, iterable=()): + self._items = [] + for i in iterable: + self.add(i) + + def __len__(self): + return len(self._items) + + def __iter__(self): + return iter(self._items) + + def __reversed__(self): + return reversed(self._items) + + def __repr__(self): + return '%s(%r)' % ( + self.__class__.__name__, + self._items) + + def __reduce__(self): + return self.__class__, (self._items,) + + def __eq__(self, other): + if len(other) != len(self._items): + return False + if isinstance(other, self.__class__): + return self._items == other._items + else: + return all(item in other for item in self._items) + + def __ne__(self, other): + if len(other) != len(self._items): + return True + if isinstance(other, self.__class__): + return self._items != other._items + else: + return any(item not in other for item in self._items) + + def __le__(self, other): + return self.issubset(other) + + def __lt__(self, other): + return len(other) > len(self._items) and self.issubset(other) + + def __ge__(self, other): + return self.issuperset(other) + + def __gt__(self, other): + return len(self._items) > len(other) and self.issuperset(other) + + def __and__(self, other): + return self._intersect(other) + + def __or__(self, other): + return self.union(other) + + def __sub__(self, other): + return self._diff(other) + + def __xor__(self, other): + return self.symmetric_difference(other) + + def __contains__(self, item): + i = bisect_left(self._items, item) + return i < len(self._items) and self._items[i] == item + + def add(self, item): + i = bisect_left(self._items, item) + if i < len(self._items): + if self._items[i] != item: + self._items.insert(i, item) + else: + self._items.append(item) + + def clear(self): + del self._items[:] + + def copy(self): + new = SortedSet() + new._items = self._items + return new + + def isdisjoint(self, other): + return len(self._intersect(other)) == 0 + + def issubset(self, other): + return len(self._intersect(other)) == len(self._items) + + def issuperset(self, other): + return len(self._intersect(other)) == len(other) + + def pop(self): + if not self._items: + raise KeyError("pop from empty set") + return self._items.pop() + + def remove(self, item): + i = bisect_left(self._items, item) + if i < len(self._items): + if self._items[i] == item: + self._items.pop(i) + return + raise KeyError('%r' % item) + + def union(self, *others): + union = SortedSet() + union._items = list(self._items) + for other in others: + if isinstance(other, self.__class__): + i = 0 + for item in other._items: + i = bisect_left(union._items, item, i) + if i < len(union._items): + if item != union._items[i]: + union._items.insert(i, item) + else: + union._items.append(item) + else: + for item in other: + union.add(item) + return union + + def intersection(self, *others): + isect = self.copy() + for other in others: + isect = isect._intersect(other) + if not isect: + break + return isect + + def difference(self, *others): + diff = self.copy() + for other in others: + diff = diff._diff(other) + if not diff: + break + return diff + + def symmetric_difference(self, other): + diff_self_other = self._diff(other) + diff_other_self = other.difference(self) + return diff_self_other.union(diff_other_self) + + def _diff(self, other): + diff = SortedSet() + if isinstance(other, self.__class__): + i = 0 + for item in self._items: + i = bisect_left(other._items, item, i) + if i < len(other._items): + if item != other._items[i]: + diff._items.append(item) + else: + diff._items.append(item) + else: + for item in self._items: + if item not in other: + diff.add(item) + return diff + + def _intersect(self, other): + isect = SortedSet() + if isinstance(other, self.__class__): + i = 0 + for item in self._items: + i = bisect_left(other._items, item, i) + if i < len(other._items): + if item == other._items[i]: + isect._items.append(item) + else: + break + else: + for item in self._items: + if item in other: + isect.add(item) + return isect diff --git a/tests/integration/standard/test_types.py b/tests/integration/standard/test_types.py index 67c6fc60c3..de45f13192 100644 --- a/tests/integration/standard/test_types.py +++ b/tests/integration/standard/test_types.py @@ -16,7 +16,7 @@ try: import unittest2 as unittest except ImportError: - import unittest # noqa + import unittest # noqa import logging log = logging.getLogger(__name__) @@ -26,16 +26,11 @@ import six from uuid import uuid1, uuid4 -try: - from blist import sortedset -except ImportError: - sortedset = set # noqa - from cassandra import InvalidRequest from cassandra.cluster import Cluster from cassandra.cqltypes import Int32Type, EMPTY from cassandra.query import dict_factory -from cassandra.util import OrderedDict +from cassandra.util import OrderedDict, SortedSet from tests.integration import get_server_versions, PROTOCOL_VERSION @@ -204,7 +199,7 @@ def test_basic_types(self): "1.2.3.4", # inet 12345, # int ['a', 'b', 'c'], # list collection - sortedset((1, 2, 3)), # set collection + SortedSet((1, 2, 3)), # set collection {'a': 1, 'b': 2}, # map collection "text", # text mydatetime, # timestamp @@ -591,7 +586,7 @@ def test_tuple_non_primitive_subtypes(self): # test tuple> for datatype in DATA_TYPE_PRIMITIVES: - created_tuple = tuple([sortedset([get_sample(datatype)])]) + created_tuple = tuple([SortedSet([get_sample(datatype)])]) s.execute("INSERT INTO mytable (k, v_%s) VALUES (0, %s)", (i, created_tuple)) result = s.execute("SELECT v_%s FROM mytable WHERE k=0", (i,))[0] @@ -660,9 +655,9 @@ def test_nested_tuples(self): "v_3 frozen<%s>," "v_128 frozen<%s>" ")" % (self.nested_tuples_schema_helper(1), - self.nested_tuples_schema_helper(2), - self.nested_tuples_schema_helper(3), - self.nested_tuples_schema_helper(128))) + self.nested_tuples_schema_helper(2), + self.nested_tuples_schema_helper(3), + self.nested_tuples_schema_helper(128))) for i in (1, 2, 3, 128): # create tuple diff --git a/tests/unit/test_marshalling.py b/tests/unit/test_marshalling.py index 16299fc289..1acbb236e8 100644 --- a/tests/unit/test_marshalling.py +++ b/tests/unit/test_marshalling.py @@ -23,13 +23,8 @@ from decimal import Decimal from uuid import UUID -try: - from blist import sortedset -except ImportError: - sortedset = set - from cassandra.cqltypes import lookup_casstype -from cassandra.util import OrderedDict +from cassandra.util import OrderedDict, SortedSet marshalled_value_pairs = ( # binary form, type, python native type @@ -82,7 +77,7 @@ (b'', 'SetType(LongType)', None), (b'\x00\x00', 'MapType(DecimalType, BooleanType)', OrderedDict()), (b'\x00\x00', 'ListType(FloatType)', []), - (b'\x00\x00', 'SetType(IntegerType)', sortedset()), + (b'\x00\x00', 'SetType(IntegerType)', SortedSet()), (b'\x00\x01\x00\x10\xafYC\xa3\xea<\x11\xe1\xabc\xc4,\x03"y\xf0', 'ListType(TimeUUIDType)', [UUID(bytes=b'\xafYC\xa3\xea<\x11\xe1\xabc\xc4,\x03"y\xf0')]), ) @@ -95,7 +90,7 @@ # vagaries of internal python ordering for unordered types marshalled_value_pairs_unsafe = ( (b'\x00\x03\x00\x06\xe3\x81\xbfbob\x00\x04\x00\x00\x00\xc7\x00\x00\x00\x04\xff\xff\xff\xff\x00\x01\\\x00\x04\x00\x00\x00\x00', 'MapType(UTF8Type, Int32Type)', ordered_dict_value), - (b'\x00\x02\x00\x08@\x01\x99\x99\x99\x99\x99\x9a\x00\x08@\x14\x00\x00\x00\x00\x00\x00', 'SetType(DoubleType)', sortedset([2.2, 5.0])), + (b'\x00\x02\x00\x08@\x01\x99\x99\x99\x99\x99\x9a\x00\x08@\x14\x00\x00\x00\x00\x00\x00', 'SetType(DoubleType)', SortedSet([2.2, 5.0])), (b'\x00', 'IntegerType', 0), ) diff --git a/tests/unit/test_sortedset.py b/tests/unit/test_sortedset.py new file mode 100644 index 0000000000..d54c5f4d1a --- /dev/null +++ b/tests/unit/test_sortedset.py @@ -0,0 +1,245 @@ +# Copyright 2013-2014 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa + +from cassandra.util import SortedSet + + +class TestSortedSet(unittest.TestCase): + def test_init(self): + input = [5, 4, 3, 2, 1, 1, 1] + expected = sorted(set(input)) + ss = SortedSet(input) + self.assertEqual(len(ss), len(expected)) + self.assertEqual(list(ss), expected) + + def test_contains(self): + input = [5, 4, 3, 2, 1, 1, 1] + expected = sorted(set(input)) + ss = SortedSet(input) + + for i in expected: + self.assertTrue(i in ss) + self.assertFalse(i not in ss) + + hi = max(expected)+1 + lo = min(expected)-1 + + self.assertFalse(hi in ss) + self.assertFalse(lo in ss) + + def test_mutable_contents(self): + ba = bytearray(b'some data here') + ss = SortedSet([ba, ba]) + self.assertEqual(list(ss), [ba]) + + def test_clear(self): + ss = SortedSet([1, 2, 3]) + ss.clear() + self.assertEqual(len(ss), 0) + + def test_equal(self): + s1 = set([1]) + s12 = set([1, 2]) + ss1 = SortedSet(s1) + ss12 = SortedSet(s12) + + self.assertEqual(ss1, s1) + self.assertEqual(ss12, s12) + self.assertNotEqual(ss1, ss12) + self.assertNotEqual(ss12, ss1) + self.assertNotEqual(ss1, s12) + self.assertNotEqual(ss12, s1) + + def test_copy(self): + o = object() + ss = SortedSet([object(), o]) + ss2 = ss.copy() + self.assertNotEqual(id(ss), id(ss2)) + self.assertTrue(o in ss) + self.assertTrue(o in ss2) + + def test_isdisjoint(self): + # set, ss + s12 = set([1, 2]) + s2 = set([2]) + ss1 = SortedSet([1]) + ss13 = SortedSet([1, 3]) + ss3 = SortedSet([3]) + # s ss disjoint + self.assertTrue(s2.isdisjoint(ss1)) + self.assertTrue(s2.isdisjoint(ss13)) + # s ss not disjoint + self.assertFalse(s12.isdisjoint(ss1)) + self.assertFalse(s12.isdisjoint(ss13)) + # ss s disjoint + self.assertTrue(ss1.isdisjoint(s2)) + self.assertTrue(ss13.isdisjoint(s2)) + # ss s not disjoint + self.assertFalse(ss1.isdisjoint(s12)) + self.assertFalse(ss13.isdisjoint(s12)) + # ss ss disjoint + self.assertTrue(ss1.isdisjoint(ss3)) + self.assertTrue(ss3.isdisjoint(ss1)) + # ss ss not disjoint + self.assertFalse(ss1.isdisjoint(ss13)) + self.assertFalse(ss13.isdisjoint(ss1)) + self.assertFalse(ss3.isdisjoint(ss13)) + self.assertFalse(ss13.isdisjoint(ss3)) + + def test_issubset(self): + s12 = set([1, 2]) + ss1 = SortedSet([1]) + ss13 = SortedSet([1, 3]) + ss3 = SortedSet([3]) + + self.assertTrue(ss1.issubset(s12)) + self.assertTrue(ss1.issubset(ss13)) + + self.assertFalse(ss1.issubset(ss3)) + self.assertFalse(ss13.issubset(ss3)) + self.assertFalse(ss13.issubset(ss1)) + self.assertFalse(ss13.issubset(s12)) + + def test_issuperset(self): + s12 = set([1, 2]) + ss1 = SortedSet([1]) + ss13 = SortedSet([1, 3]) + ss3 = SortedSet([3]) + + self.assertTrue(s12.issuperset(ss1)) + self.assertTrue(ss13.issuperset(ss3)) + self.assertTrue(ss13.issuperset(ss13)) + + self.assertFalse(s12.issuperset(ss13)) + self.assertFalse(ss1.issuperset(ss3)) + self.assertFalse(ss1.issuperset(ss13)) + + def test_union(self): + s1 = set([1]) + ss12 = SortedSet([1, 2]) + ss23 = SortedSet([2, 3]) + + self.assertEqual(SortedSet().union(s1), SortedSet([1])) + self.assertEqual(ss12.union(s1), SortedSet([1, 2])) + self.assertEqual(ss12.union(ss23), SortedSet([1, 2, 3])) + self.assertEqual(ss23.union(ss12), SortedSet([1, 2, 3])) + self.assertEqual(ss23.union(s1), SortedSet([1, 2, 3])) + + def test_intersection(self): + s12 = set([1, 2]) + ss23 = SortedSet([2, 3]) + self.assertEqual(s12.intersection(ss23), set([2])) + self.assertEqual(ss23.intersection(s12), SortedSet([2])) + self.assertEqual(ss23.intersection(s12, [2], (2,)), SortedSet([2])) + self.assertEqual(ss23.intersection(s12, [900], (2,)), SortedSet()) + + def test_difference(self): + s1 = set([1]) + ss12 = SortedSet([1, 2]) + ss23 = SortedSet([2, 3]) + + self.assertEqual(SortedSet().difference(s1), SortedSet()) + self.assertEqual(ss12.difference(s1), SortedSet([2])) + self.assertEqual(ss12.difference(ss23), SortedSet([1])) + self.assertEqual(ss23.difference(ss12), SortedSet([3])) + self.assertEqual(ss23.difference(s1), SortedSet([2, 3])) + + def test_symmetric_difference(self): + s = set([1, 3, 5]) + ss = SortedSet([2, 3, 4]) + ss2 = SortedSet([5, 6, 7]) + + self.assertEqual(ss.symmetric_difference(s), SortedSet([1, 2, 4, 5])) + self.assertFalse(ss.symmetric_difference(ss)) + self.assertEqual(ss.symmetric_difference(s), SortedSet([1, 2, 4, 5])) + self.assertEqual(ss2.symmetric_difference(ss), SortedSet([2, 3, 4, 5, 6, 7])) + + def test_pop(self): + ss = SortedSet([2, 1]) + self.assertEqual(ss.pop(), 2) + self.assertEqual(ss.pop(), 1) + try: + ss.pop() + self.fail("Error not thrown") + except (KeyError, IndexError) as e: + pass + + def test_remove(self): + ss = SortedSet([2, 1]) + self.assertEqual(len(ss), 2) + self.assertRaises(KeyError, ss.remove, 3) + self.assertEqual(len(ss), 2) + ss.remove(1) + self.assertEqual(len(ss), 1) + ss.remove(2) + self.assertFalse(ss) + self.assertRaises(KeyError, ss.remove, 2) + self.assertFalse(ss) + + def test_operators(self): + + ss1 = SortedSet([1]) + ss12 = SortedSet([1, 2]) + # __ne__ + self.assertFalse(ss12 != ss12) + self.assertFalse(ss12 != SortedSet([1, 2])) + self.assertTrue(ss12 != SortedSet()) + + # __le__ + self.assertTrue(ss1 <= ss12) + self.assertTrue(ss12 <= ss12) + self.assertFalse(ss12 <= ss1) + + # __lt__ + self.assertTrue(ss1 < ss12) + self.assertFalse(ss12 < ss12) + self.assertFalse(ss12 < ss1) + + # __ge__ + self.assertFalse(ss1 >= ss12) + self.assertTrue(ss12 >= ss12) + self.assertTrue(ss12 >= ss1) + + # __gt__ + self.assertFalse(ss1 > ss12) + self.assertFalse(ss12 > ss12) + self.assertTrue(ss12 > ss1) + + # __and__ + self.assertEqual(ss1 & ss12, ss1) + self.assertEqual(ss12 & ss12, ss12) + self.assertEqual(ss12 & set(), SortedSet()) + + # __or__ + self.assertEqual(ss1 | ss12, ss12) + self.assertEqual(ss12 | ss12, ss12) + self.assertEqual(ss12 | set(), ss12) + self.assertEqual(SortedSet() | ss1 | ss12, ss12) + + # __sub__ + self.assertEqual(ss1 - ss12, set()) + self.assertEqual(ss12 - ss12, set()) + self.assertEqual(ss12 - set(), ss12) + self.assertEqual(ss12 - ss1, SortedSet([2])) + + # __xor__ + self.assertEqual(ss1 ^ ss12, set([2])) + self.assertEqual(ss12 ^ ss1, set([2])) + self.assertEqual(ss12 ^ ss12, set()) + self.assertEqual(ss12 ^ set(), ss12) From 1d561a80ea96ebc71cb136291591b7936bd9eca8 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Tue, 7 Oct 2014 18:18:46 -0700 Subject: [PATCH 0967/3726] some settings were removed in C 2.1 --- cqlengine/tests/management/test_management.py | 20 +++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/cqlengine/tests/management/test_management.py b/cqlengine/tests/management/test_management.py index 757e799002..d42cd69a56 100644 --- a/cqlengine/tests/management/test_management.py +++ b/cqlengine/tests/management/test_management.py @@ -149,19 +149,22 @@ def test_set_table_properties(self): 'caching': CACHING_ALL, 'comment': 'TxfguvBdzwROQALmQBOziRMbkqVGFjqcJfVhwGR', 'gc_grace_seconds': 2063, - 'populate_io_cache_on_flush': True, 'read_repair_chance': 0.17985, - 'replicate_on_write': False # For some reason 'dclocal_read_repair_chance' in CQL is called # just 'local_read_repair_chance' in the schema table. # Source: https://issues.apache.org/jira/browse/CASSANDRA-6717 # TODO: due to a bug in the native driver i'm not seeing the local read repair chance show up # 'local_read_repair_chance': 0.50811, } + if CASSANDRA_VERSION <= 20: + expected['replicate_on_write'] = False + + if CASSANDRA_VERSION == 20: + expected['populate_io_cache_on_flush'] = True + expected['index_interval'] = 98706 if CASSANDRA_VERSION >= 20: expected['default_time_to_live'] = 4756 - expected['index_interval'] = 98706 expected['memtable_flush_period_in_ms'] = 43681 self.assertDictContainsSubset(expected, management.get_table_settings(ModelWithTableProperties).options) @@ -190,16 +193,21 @@ def test_table_property_update(self): 'caching': CACHING_NONE, 'comment': 'xirAkRWZVVvsmzRvXamiEcQkshkUIDINVJZgLYSdnGHweiBrAiJdLJkVohdRy', 'gc_grace_seconds': 96362, - 'populate_io_cache_on_flush': False, 'read_repair_chance': 0.2989, - 'replicate_on_write': True # TODO see above comment re: native driver missing local read repair chance - # 'local_read_repair_chance': 0.12732, + #'local_read_repair_chance': 0.12732, } if CASSANDRA_VERSION >= 20: expected['memtable_flush_period_in_ms'] = 60210 expected['default_time_to_live'] = 65178 + + if CASSANDRA_VERSION == 20: expected['index_interval'] = 94207 + # these featuers removed in cassandra 2.1 + if CASSANDRA_VERSION <= 20: + expected['replicate_on_write'] = True + expected['populate_io_cache_on_flush'] = False + self.assertDictContainsSubset(expected, table_settings) From a2759ddd974482b3053e7c183cd9d437ef2b8f6b Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Tue, 7 Oct 2014 20:31:03 -0700 Subject: [PATCH 0968/3726] only check caching on c < 2.1 --- cqlengine/tests/management/test_management.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cqlengine/tests/management/test_management.py b/cqlengine/tests/management/test_management.py index d42cd69a56..3bda88ed9d 100644 --- a/cqlengine/tests/management/test_management.py +++ b/cqlengine/tests/management/test_management.py @@ -146,7 +146,6 @@ def test_set_table_properties(self): sync_table(ModelWithTableProperties) expected = {'bloom_filter_fp_chance': 0.76328, - 'caching': CACHING_ALL, 'comment': 'TxfguvBdzwROQALmQBOziRMbkqVGFjqcJfVhwGR', 'gc_grace_seconds': 2063, 'read_repair_chance': 0.17985, @@ -157,6 +156,7 @@ def test_set_table_properties(self): # 'local_read_repair_chance': 0.50811, } if CASSANDRA_VERSION <= 20: + expected['caching'] = CACHING_ALL expected['replicate_on_write'] = False if CASSANDRA_VERSION == 20: @@ -190,7 +190,6 @@ def test_table_property_update(self): table_settings = management.get_table_settings(ModelWithTableProperties).options expected = {'bloom_filter_fp_chance': 0.66778, - 'caching': CACHING_NONE, 'comment': 'xirAkRWZVVvsmzRvXamiEcQkshkUIDINVJZgLYSdnGHweiBrAiJdLJkVohdRy', 'gc_grace_seconds': 96362, 'read_repair_chance': 0.2989, @@ -205,6 +204,7 @@ def test_table_property_update(self): # these featuers removed in cassandra 2.1 if CASSANDRA_VERSION <= 20: + expected['caching'] = CACHING_NONE expected['replicate_on_write'] = True expected['populate_io_cache_on_flush'] = False From 691ec35d89ae889a109a1756f6cdc42730eb9a9c Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Tue, 7 Oct 2014 20:50:23 -0700 Subject: [PATCH 0969/3726] updated commit log --- changelog | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/changelog b/changelog index e0fdec574f..88a4894fdf 100644 --- a/changelog +++ b/changelog @@ -1,5 +1,11 @@ CHANGELOG +0.19.0 + +Fixed tests with Cassandra version 1.2 and 2.1 +Fixed broken static columns +support for IF NOT EXISTS via Model.if_not_exists().create() + 0.18.1 [264] fixed support for bytearrays in blob columns From accafd2eedd6991a33e493c4a8edfaf4ed8287d6 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Tue, 7 Oct 2014 20:51:34 -0700 Subject: [PATCH 0970/3726] updated version --- cqlengine/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cqlengine/VERSION b/cqlengine/VERSION index 249afd517d..1cf0537c34 100644 --- a/cqlengine/VERSION +++ b/cqlengine/VERSION @@ -1 +1 @@ -0.18.1 +0.19.0 From 7abba76e74b14fd1a461737321523d5cf6db5e9b Mon Sep 17 00:00:00 2001 From: timmartin19 Date: Wed, 8 Oct 2014 12:24:47 -0400 Subject: [PATCH 0971/3726] =?UTF-8?q?Initial=20attempt=E2=80=A6?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- cqlengine/models.py | 28 ++++++++++- cqlengine/query.py | 49 ++++++++++++++++++- cqlengine/statements.py | 44 ++++++++++++++++- .../model/test_transaction_statements.py | 36 ++++++++++++++ 4 files changed, 154 insertions(+), 3 deletions(-) create mode 100644 cqlengine/tests/model/test_transaction_statements.py diff --git a/cqlengine/models.py b/cqlengine/models.py index d725e42c9f..92ce9bace2 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -74,6 +74,27 @@ def __call__(self, *args, **kwargs): raise NotImplementedError +class TransactionDescriptor(object): + """ + returns a query set descriptor + """ + def __get__(self, instance, model): + if instance: + def transaction_setter(transaction): + instance._transaction = transaction + return instance + return transaction_setter + qs = model.__queryset__(model) + + def transaction_setter(transaction): + qs._transaction = transaction + return instance + return transaction_setter + + def __call__(self, *args, **kwargs): + raise NotImplementedError + + class TTLDescriptor(object): """ returns a query set descriptor @@ -222,6 +243,7 @@ class MultipleObjectsReturned(_MultipleObjectsReturned): pass objects = QuerySetDescriptor() ttl = TTLDescriptor() consistency = ConsistencyDescriptor() + transaction = TransactionDescriptor() # custom timestamps, see USING TIMESTAMP X timestamp = TimestampDescriptor() @@ -282,7 +304,7 @@ def __init__(self, **values): self._timestamp = None for name, column in self._columns.items(): - value = values.get(name, None) + value = values.get(name, None) if value is not None or isinstance(column, columns.BaseContainerColumn): value = column.to_python(value) value_mngr = column.value_manager(self, column, value) @@ -510,6 +532,10 @@ def filter(cls, *args, **kwargs): return cls.objects.filter(*args, **kwargs) + @classmethod + def transaction(cls, *args, **kwargs): + return cls.objects.transaction(*args, **kwargs) + @classmethod def get(cls, *args, **kwargs): return cls.objects.get(*args, **kwargs) diff --git a/cqlengine/query.py b/cqlengine/query.py index de929c76e0..0f042dbf53 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -13,7 +13,7 @@ #http://www.datastax.com/docs/1.1/references/cql/index from cqlengine.operators import InOperator, EqualsOperator, GreaterThanOperator, GreaterThanOrEqualOperator from cqlengine.operators import LessThanOperator, LessThanOrEqualOperator, BaseWhereOperator -from cqlengine.statements import WhereClause, SelectStatement, DeleteStatement, UpdateStatement, AssignmentClause, InsertStatement, BaseCQLStatement, MapUpdateClause, MapDeleteClause, ListUpdateClause, SetUpdateClause, CounterUpdateClause +from cqlengine.statements import WhereClause, SelectStatement, DeleteStatement, UpdateStatement, AssignmentClause, InsertStatement, BaseCQLStatement, MapUpdateClause, MapDeleteClause, ListUpdateClause, SetUpdateClause, CounterUpdateClause, TransactionClause class QueryException(CQLEngineException): pass @@ -194,6 +194,9 @@ def __init__(self, model): #Where clause filters self._where = [] + # Transaction clause filters + self._transaction = [] + #ordering arguments self._order = [] @@ -394,6 +397,50 @@ def _parse_filter_arg(self, arg): else: raise QueryException("Can't parse '{}'".format(arg)) + def transaction(self, *args, **kwargs): + """Adds IF statements to queryset""" + if len([x for x in kwargs.values() if x is None]): + raise CQLEngineException("None values on transaction are not allowed") + + clone = copy.deepcopy(self) + for operator in args: + if not isinstance(operator, TransactionClause): + raise QueryException('{} is not a valid query operator'.format(operator)) + clone._transaction.append(operator) + + for col_name, val in kwargs.items(): + try: + column = self.model._get_column(col_name) + except KeyError: + if col_name in ['exists', 'not_exists']: + pass + elif col_name == 'pk__token': + if not isinstance(val, Token): + raise QueryException("Virtual column 'pk__token' may only be compared to Token() values") + column = columns._PartitionKeysToken(self.model) + quote_field = False + else: + raise QueryException("Can't resolve column name: '{}'".format(col_name)) + + if isinstance(val, Token): + if col_name != 'pk__token': + raise QueryException("Token() values may only be compared to the 'pk__token' virtual column") + partition_columns = column.partition_columns + if len(partition_columns) != len(val.value): + raise QueryException( + 'Token() received {} arguments but model has {} partition keys'.format( + len(val.value), len(partition_columns))) + val.set_columns(partition_columns) + + if isinstance(val, BaseQueryFunction): + query_val = val + else: + query_val = column.to_database(val) + + clone._transaction.append(TransactionClause(col_name, query_val)) + + return clone + def filter(self, *args, **kwargs): """ Adds WHERE arguments to the queryset, returning a new queryset diff --git a/cqlengine/statements.py b/cqlengine/statements.py index f1681a1658..834db39179 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -139,6 +139,20 @@ def insert_tuple(self): return self.field, self.context_id +class TransactionClause(BaseClause): + """ A single variable transaction statement """ + + def __unicode__(self): + if self.field == 'exists': + return u'EXISTS' + if self.field == 'not_exists': + return u'NOT EXISTS' + return u'"{}" = %({})s'.format(self.field, self.context_id) + + def insert_tuple(self): + return self.field, self.context_id + + class ContainerUpdateClause(AssignmentClause): def __init__(self, field, value, operation=None, previous=None, column=None): @@ -573,7 +587,8 @@ def __init__(self, consistency=None, where=None, ttl=None, - timestamp=None): + timestamp=None, + transactions=None): super(AssignmentStatement, self).__init__( table, consistency=consistency, @@ -587,6 +602,11 @@ def __init__(self, for assignment in assignments or []: self.add_assignment_clause(assignment) + # Add transaction statements + self.transactions = [] + for transaction in transactions or []: + self.add_transaction_clause(transaction) + def update_context_id(self, i): super(AssignmentStatement, self).update_context_id(i) for assignment in self.assignments: @@ -605,6 +625,19 @@ def add_assignment_clause(self, clause): self.context_counter += clause.get_context_size() self.assignments.append(clause) + def add_transaction_clause(self, clause): + """ + Adds a transaction clause to this statement + + :param clause: The clause that will be added to the transaction statement + :type clause: TransactionClause + """ + if not isinstance(clause, TransactionClause): + raise StatementException('only instances of AssignmentClause can be added to statements') + clause.set_context_id(self.context_counter) + self.context_counter += clause.get_context_size() + self.transactions.append(clause) + @property def is_empty(self): return len(self.assignments) == 0 @@ -615,6 +648,9 @@ def get_context(self): clause.update_context(ctx) return ctx + def _transactions(self): + return 'IF {}'.format(' AND '.join([six.text_type(c) for c in self.where_clauses])) + class InsertStatement(AssignmentStatement): """ an cql insert select statement """ @@ -639,6 +675,9 @@ def __unicode__(self): if self.timestamp: qs += ["USING TIMESTAMP {}".format(self.timestamp_normalized)] + if len(self.transactions) > 0: + qs += [self._transactions] + return ' '.join(qs) @@ -665,6 +704,9 @@ def __unicode__(self): if self.where_clauses: qs += [self._where] + if len(self.transactions) > 0: + qs += [self._transactions] + return ' '.join(qs) diff --git a/cqlengine/tests/model/test_transaction_statements.py b/cqlengine/tests/model/test_transaction_statements.py new file mode 100644 index 0000000000..8afabb1ed2 --- /dev/null +++ b/cqlengine/tests/model/test_transaction_statements.py @@ -0,0 +1,36 @@ +__author__ = 'Tim Martin' +from uuid import uuid4 + +from mock import patch +from cqlengine.exceptions import ValidationError + +from cqlengine.tests.base import BaseCassEngTestCase +from cqlengine.models import Model +from cqlengine import columns +from cqlengine.management import sync_table, drop_table + + +class TestUpdateModel(Model): + __keyspace__ = 'test' + partition = columns.UUID(primary_key=True, default=uuid4) + cluster = columns.UUID(primary_key=True, default=uuid4) + count = columns.Integer(required=False) + text = columns.Text(required=False, index=True) + + +class ModelUpdateTests(BaseCassEngTestCase): + + @classmethod + def setUpClass(cls): + super(ModelUpdateTests, cls).setUpClass() + sync_table(TestUpdateModel) + + @classmethod + def tearDownClass(cls): + super(ModelUpdateTests, cls).tearDownClass() + drop_table(TestUpdateModel) + + def test_transaction_insertion(self): + m = TestUpdateModel(count=5, text='something').transaction(exists=True) + m.save() + x = 10 \ No newline at end of file From 5ad9759b4a3c5bcc36a09dfa25dca9dee4ab8919 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 8 Oct 2014 11:36:13 -0500 Subject: [PATCH 0972/3726] Change to 'sortedset' to match cqlsh type mapping. also made eq, ne comparisons more robust for other types --- cassandra/cqltypes.py | 4 +- cassandra/encoder.py | 4 +- cassandra/util.py | 26 +++--- tests/integration/standard/test_types.py | 6 +- tests/unit/test_marshalling.py | 6 +- tests/unit/test_sortedset.py | 106 ++++++++++++----------- 6 files changed, 80 insertions(+), 72 deletions(-) diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index b177e1417e..9fae266818 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -47,7 +47,7 @@ int32_pack, int32_unpack, int64_pack, int64_unpack, float_pack, float_unpack, double_pack, double_unpack, varint_pack, varint_unpack) -from cassandra.util import OrderedDict, SortedSet +from cassandra.util import OrderedDict, sortedset apache_cassandra_type_prefix = 'org.apache.cassandra.db.marshal.' @@ -712,7 +712,7 @@ class ListType(_SimpleParameterizedType): class SetType(_SimpleParameterizedType): typename = 'set' num_subtypes = 1 - adapter = SortedSet + adapter = sortedset class MapType(_ParameterizedType): diff --git a/cassandra/encoder.py b/cassandra/encoder.py index 1abb811c4a..8b1bb2fe73 100644 --- a/cassandra/encoder.py +++ b/cassandra/encoder.py @@ -28,7 +28,7 @@ from uuid import UUID import six -from cassandra.util import OrderedDict, SortedSet +from cassandra.util import OrderedDict, sortedset if six.PY3: long = int @@ -79,7 +79,7 @@ def __init__(self): list: self.cql_encode_list_collection, tuple: self.cql_encode_list_collection, set: self.cql_encode_set_collection, - SortedSet: self.cql_encode_set_collection, + sortedset: self.cql_encode_set_collection, frozenset: self.cql_encode_set_collection, types.GeneratorType: self.cql_encode_list_collection, ValueSequence: self.cql_encode_sequence diff --git a/cassandra/util.py b/cassandra/util.py index 15e2fecf0e..5c078c6f8d 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -351,7 +351,7 @@ def isdisjoint(self, other): return len(self.intersection(other)) == 0 try: - from blist import sortedset as SortedSet + from blist import sortedset except ImportError: import warnings @@ -363,7 +363,7 @@ def isdisjoint(self, other): from bisect import bisect_left - class SortedSet(object): + class sortedset(object): ''' A sorted set based on sorted list @@ -400,20 +400,22 @@ def __reduce__(self): return self.__class__, (self._items,) def __eq__(self, other): - if len(other) != len(self._items): - return False if isinstance(other, self.__class__): return self._items == other._items else: - return all(item in other for item in self._items) + if not isinstance(other, set): + return False + + return len(other) == len(self._items) and all(item in other for item in self._items) def __ne__(self, other): - if len(other) != len(self._items): - return True if isinstance(other, self.__class__): return self._items != other._items else: - return any(item not in other for item in self._items) + if not isinstance(other, set): + return True + + return len(other) != len(self._items) or any(item not in other for item in self._items) def __le__(self, other): return self.issubset(other) @@ -455,7 +457,7 @@ def clear(self): del self._items[:] def copy(self): - new = SortedSet() + new = sortedset() new._items = self._items return new @@ -482,7 +484,7 @@ def remove(self, item): raise KeyError('%r' % item) def union(self, *others): - union = SortedSet() + union = sortedset() union._items = list(self._items) for other in others: if isinstance(other, self.__class__): @@ -521,7 +523,7 @@ def symmetric_difference(self, other): return diff_self_other.union(diff_other_self) def _diff(self, other): - diff = SortedSet() + diff = sortedset() if isinstance(other, self.__class__): i = 0 for item in self._items: @@ -538,7 +540,7 @@ def _diff(self, other): return diff def _intersect(self, other): - isect = SortedSet() + isect = sortedset() if isinstance(other, self.__class__): i = 0 for item in self._items: diff --git a/tests/integration/standard/test_types.py b/tests/integration/standard/test_types.py index de45f13192..553085f6e8 100644 --- a/tests/integration/standard/test_types.py +++ b/tests/integration/standard/test_types.py @@ -30,7 +30,7 @@ from cassandra.cluster import Cluster from cassandra.cqltypes import Int32Type, EMPTY from cassandra.query import dict_factory -from cassandra.util import OrderedDict, SortedSet +from cassandra.util import OrderedDict, sortedset from tests.integration import get_server_versions, PROTOCOL_VERSION @@ -199,7 +199,7 @@ def test_basic_types(self): "1.2.3.4", # inet 12345, # int ['a', 'b', 'c'], # list collection - SortedSet((1, 2, 3)), # set collection + sortedset((1, 2, 3)), # set collection {'a': 1, 'b': 2}, # map collection "text", # text mydatetime, # timestamp @@ -586,7 +586,7 @@ def test_tuple_non_primitive_subtypes(self): # test tuple> for datatype in DATA_TYPE_PRIMITIVES: - created_tuple = tuple([SortedSet([get_sample(datatype)])]) + created_tuple = tuple([sortedset([get_sample(datatype)])]) s.execute("INSERT INTO mytable (k, v_%s) VALUES (0, %s)", (i, created_tuple)) result = s.execute("SELECT v_%s FROM mytable WHERE k=0", (i,))[0] diff --git a/tests/unit/test_marshalling.py b/tests/unit/test_marshalling.py index 1acbb236e8..18dbf2f8a5 100644 --- a/tests/unit/test_marshalling.py +++ b/tests/unit/test_marshalling.py @@ -24,7 +24,7 @@ from uuid import UUID from cassandra.cqltypes import lookup_casstype -from cassandra.util import OrderedDict, SortedSet +from cassandra.util import OrderedDict, sortedset marshalled_value_pairs = ( # binary form, type, python native type @@ -77,7 +77,7 @@ (b'', 'SetType(LongType)', None), (b'\x00\x00', 'MapType(DecimalType, BooleanType)', OrderedDict()), (b'\x00\x00', 'ListType(FloatType)', []), - (b'\x00\x00', 'SetType(IntegerType)', SortedSet()), + (b'\x00\x00', 'SetType(IntegerType)', sortedset()), (b'\x00\x01\x00\x10\xafYC\xa3\xea<\x11\xe1\xabc\xc4,\x03"y\xf0', 'ListType(TimeUUIDType)', [UUID(bytes=b'\xafYC\xa3\xea<\x11\xe1\xabc\xc4,\x03"y\xf0')]), ) @@ -90,7 +90,7 @@ # vagaries of internal python ordering for unordered types marshalled_value_pairs_unsafe = ( (b'\x00\x03\x00\x06\xe3\x81\xbfbob\x00\x04\x00\x00\x00\xc7\x00\x00\x00\x04\xff\xff\xff\xff\x00\x01\\\x00\x04\x00\x00\x00\x00', 'MapType(UTF8Type, Int32Type)', ordered_dict_value), - (b'\x00\x02\x00\x08@\x01\x99\x99\x99\x99\x99\x9a\x00\x08@\x14\x00\x00\x00\x00\x00\x00', 'SetType(DoubleType)', SortedSet([2.2, 5.0])), + (b'\x00\x02\x00\x08@\x01\x99\x99\x99\x99\x99\x9a\x00\x08@\x14\x00\x00\x00\x00\x00\x00', 'SetType(DoubleType)', sortedset([2.2, 5.0])), (b'\x00', 'IntegerType', 0), ) diff --git a/tests/unit/test_sortedset.py b/tests/unit/test_sortedset.py index d54c5f4d1a..159f5a257e 100644 --- a/tests/unit/test_sortedset.py +++ b/tests/unit/test_sortedset.py @@ -17,21 +17,22 @@ except ImportError: import unittest # noqa -from cassandra.util import SortedSet +from cassandra.util import sortedset +from cassandra.cqltypes import EMPTY class TestSortedSet(unittest.TestCase): def test_init(self): input = [5, 4, 3, 2, 1, 1, 1] expected = sorted(set(input)) - ss = SortedSet(input) + ss = sortedset(input) self.assertEqual(len(ss), len(expected)) self.assertEqual(list(ss), expected) def test_contains(self): input = [5, 4, 3, 2, 1, 1, 1] expected = sorted(set(input)) - ss = SortedSet(input) + ss = sortedset(input) for i in expected: self.assertTrue(i in ss) @@ -45,19 +46,19 @@ def test_contains(self): def test_mutable_contents(self): ba = bytearray(b'some data here') - ss = SortedSet([ba, ba]) + ss = sortedset([ba, ba]) self.assertEqual(list(ss), [ba]) def test_clear(self): - ss = SortedSet([1, 2, 3]) + ss = sortedset([1, 2, 3]) ss.clear() self.assertEqual(len(ss), 0) def test_equal(self): s1 = set([1]) s12 = set([1, 2]) - ss1 = SortedSet(s1) - ss12 = SortedSet(s12) + ss1 = sortedset(s1) + ss12 = sortedset(s12) self.assertEqual(ss1, s1) self.assertEqual(ss12, s12) @@ -65,10 +66,15 @@ def test_equal(self): self.assertNotEqual(ss12, ss1) self.assertNotEqual(ss1, s12) self.assertNotEqual(ss12, s1) + self.assertNotEqual(ss1, EMPTY) def test_copy(self): - o = object() - ss = SortedSet([object(), o]) + class comparable(object): + def __lt__(self, other): + return id(self) < id(other) + + o = comparable() + ss = sortedset([comparable(), o]) ss2 = ss.copy() self.assertNotEqual(id(ss), id(ss2)) self.assertTrue(o in ss) @@ -78,9 +84,9 @@ def test_isdisjoint(self): # set, ss s12 = set([1, 2]) s2 = set([2]) - ss1 = SortedSet([1]) - ss13 = SortedSet([1, 3]) - ss3 = SortedSet([3]) + ss1 = sortedset([1]) + ss13 = sortedset([1, 3]) + ss3 = sortedset([3]) # s ss disjoint self.assertTrue(s2.isdisjoint(ss1)) self.assertTrue(s2.isdisjoint(ss13)) @@ -104,9 +110,9 @@ def test_isdisjoint(self): def test_issubset(self): s12 = set([1, 2]) - ss1 = SortedSet([1]) - ss13 = SortedSet([1, 3]) - ss3 = SortedSet([3]) + ss1 = sortedset([1]) + ss13 = sortedset([1, 3]) + ss3 = sortedset([3]) self.assertTrue(ss1.issubset(s12)) self.assertTrue(ss1.issubset(ss13)) @@ -118,9 +124,9 @@ def test_issubset(self): def test_issuperset(self): s12 = set([1, 2]) - ss1 = SortedSet([1]) - ss13 = SortedSet([1, 3]) - ss3 = SortedSet([3]) + ss1 = sortedset([1]) + ss13 = sortedset([1, 3]) + ss3 = sortedset([3]) self.assertTrue(s12.issuperset(ss1)) self.assertTrue(ss13.issuperset(ss3)) @@ -132,46 +138,46 @@ def test_issuperset(self): def test_union(self): s1 = set([1]) - ss12 = SortedSet([1, 2]) - ss23 = SortedSet([2, 3]) + ss12 = sortedset([1, 2]) + ss23 = sortedset([2, 3]) - self.assertEqual(SortedSet().union(s1), SortedSet([1])) - self.assertEqual(ss12.union(s1), SortedSet([1, 2])) - self.assertEqual(ss12.union(ss23), SortedSet([1, 2, 3])) - self.assertEqual(ss23.union(ss12), SortedSet([1, 2, 3])) - self.assertEqual(ss23.union(s1), SortedSet([1, 2, 3])) + self.assertEqual(sortedset().union(s1), sortedset([1])) + self.assertEqual(ss12.union(s1), sortedset([1, 2])) + self.assertEqual(ss12.union(ss23), sortedset([1, 2, 3])) + self.assertEqual(ss23.union(ss12), sortedset([1, 2, 3])) + self.assertEqual(ss23.union(s1), sortedset([1, 2, 3])) def test_intersection(self): s12 = set([1, 2]) - ss23 = SortedSet([2, 3]) + ss23 = sortedset([2, 3]) self.assertEqual(s12.intersection(ss23), set([2])) - self.assertEqual(ss23.intersection(s12), SortedSet([2])) - self.assertEqual(ss23.intersection(s12, [2], (2,)), SortedSet([2])) - self.assertEqual(ss23.intersection(s12, [900], (2,)), SortedSet()) + self.assertEqual(ss23.intersection(s12), sortedset([2])) + self.assertEqual(ss23.intersection(s12, [2], (2,)), sortedset([2])) + self.assertEqual(ss23.intersection(s12, [900], (2,)), sortedset()) def test_difference(self): s1 = set([1]) - ss12 = SortedSet([1, 2]) - ss23 = SortedSet([2, 3]) + ss12 = sortedset([1, 2]) + ss23 = sortedset([2, 3]) - self.assertEqual(SortedSet().difference(s1), SortedSet()) - self.assertEqual(ss12.difference(s1), SortedSet([2])) - self.assertEqual(ss12.difference(ss23), SortedSet([1])) - self.assertEqual(ss23.difference(ss12), SortedSet([3])) - self.assertEqual(ss23.difference(s1), SortedSet([2, 3])) + self.assertEqual(sortedset().difference(s1), sortedset()) + self.assertEqual(ss12.difference(s1), sortedset([2])) + self.assertEqual(ss12.difference(ss23), sortedset([1])) + self.assertEqual(ss23.difference(ss12), sortedset([3])) + self.assertEqual(ss23.difference(s1), sortedset([2, 3])) def test_symmetric_difference(self): s = set([1, 3, 5]) - ss = SortedSet([2, 3, 4]) - ss2 = SortedSet([5, 6, 7]) + ss = sortedset([2, 3, 4]) + ss2 = sortedset([5, 6, 7]) - self.assertEqual(ss.symmetric_difference(s), SortedSet([1, 2, 4, 5])) + self.assertEqual(ss.symmetric_difference(s), sortedset([1, 2, 4, 5])) self.assertFalse(ss.symmetric_difference(ss)) - self.assertEqual(ss.symmetric_difference(s), SortedSet([1, 2, 4, 5])) - self.assertEqual(ss2.symmetric_difference(ss), SortedSet([2, 3, 4, 5, 6, 7])) + self.assertEqual(ss.symmetric_difference(s), sortedset([1, 2, 4, 5])) + self.assertEqual(ss2.symmetric_difference(ss), sortedset([2, 3, 4, 5, 6, 7])) def test_pop(self): - ss = SortedSet([2, 1]) + ss = sortedset([2, 1]) self.assertEqual(ss.pop(), 2) self.assertEqual(ss.pop(), 1) try: @@ -181,7 +187,7 @@ def test_pop(self): pass def test_remove(self): - ss = SortedSet([2, 1]) + ss = sortedset([2, 1]) self.assertEqual(len(ss), 2) self.assertRaises(KeyError, ss.remove, 3) self.assertEqual(len(ss), 2) @@ -194,12 +200,12 @@ def test_remove(self): def test_operators(self): - ss1 = SortedSet([1]) - ss12 = SortedSet([1, 2]) + ss1 = sortedset([1]) + ss12 = sortedset([1, 2]) # __ne__ self.assertFalse(ss12 != ss12) - self.assertFalse(ss12 != SortedSet([1, 2])) - self.assertTrue(ss12 != SortedSet()) + self.assertFalse(ss12 != sortedset([1, 2])) + self.assertTrue(ss12 != sortedset()) # __le__ self.assertTrue(ss1 <= ss12) @@ -224,19 +230,19 @@ def test_operators(self): # __and__ self.assertEqual(ss1 & ss12, ss1) self.assertEqual(ss12 & ss12, ss12) - self.assertEqual(ss12 & set(), SortedSet()) + self.assertEqual(ss12 & set(), sortedset()) # __or__ self.assertEqual(ss1 | ss12, ss12) self.assertEqual(ss12 | ss12, ss12) self.assertEqual(ss12 | set(), ss12) - self.assertEqual(SortedSet() | ss1 | ss12, ss12) + self.assertEqual(sortedset() | ss1 | ss12, ss12) # __sub__ self.assertEqual(ss1 - ss12, set()) self.assertEqual(ss12 - ss12, set()) self.assertEqual(ss12 - set(), ss12) - self.assertEqual(ss12 - ss1, SortedSet([2])) + self.assertEqual(ss12 - ss1, sortedset([2])) # __xor__ self.assertEqual(ss1 ^ ss12, set([2])) From 44a518100947702eca15d258a6258d2ca17b7c62 Mon Sep 17 00:00:00 2001 From: timmartin19 Date: Wed, 8 Oct 2014 14:48:35 -0400 Subject: [PATCH 0973/3726] Bug fixes --- cqlengine/models.py | 20 +++++++++------- cqlengine/query.py | 23 +++++++++++++------ cqlengine/statements.py | 15 ++++++++---- .../model/test_transaction_statements.py | 17 ++++++++++---- 4 files changed, 52 insertions(+), 23 deletions(-) diff --git a/cqlengine/models.py b/cqlengine/models.py index 92ce9bace2..3598bcad3e 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -80,9 +80,14 @@ class TransactionDescriptor(object): """ def __get__(self, instance, model): if instance: - def transaction_setter(transaction): - instance._transaction = transaction + def transaction_setter(*prepared_transaction, **unprepared_transactions): + if len(prepared_transaction) > 0: + transactions = prepared_transaction[0] + else: + transactions = instance.objects.transaction(**unprepared_transactions)._transaction + instance._transaction = transactions return instance + return transaction_setter qs = model.__queryset__(model) @@ -302,6 +307,7 @@ def __init__(self, **values): self._values = {} self._ttl = self.__default_ttl__ self._timestamp = None + self._transaction = None for name, column in self._columns.items(): value = values.get(name, None) @@ -532,10 +538,6 @@ def filter(cls, *args, **kwargs): return cls.objects.filter(*args, **kwargs) - @classmethod - def transaction(cls, *args, **kwargs): - return cls.objects.transaction(*args, **kwargs) - @classmethod def get(cls, *args, **kwargs): return cls.objects.get(*args, **kwargs) @@ -554,7 +556,8 @@ def save(self): batch=self._batch, ttl=self._ttl, timestamp=self._timestamp, - consistency=self.__consistency__).save() + consistency=self.__consistency__, + transaction=self._transaction).save() #reset the value managers for v in self._values.values(): @@ -592,7 +595,8 @@ def update(self, **values): batch=self._batch, ttl=self._ttl, timestamp=self._timestamp, - consistency=self.__consistency__).update() + consistency=self.__consistency__, + transaction=self._transaction).update() #reset the value managers for v in self._values.values(): diff --git a/cqlengine/query.py b/cqlengine/query.py index 0f042dbf53..e85771b568 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -409,11 +409,12 @@ def transaction(self, *args, **kwargs): clone._transaction.append(operator) for col_name, val in kwargs.items(): + exists = False try: column = self.model._get_column(col_name) except KeyError: if col_name in ['exists', 'not_exists']: - pass + exists = True elif col_name == 'pk__token': if not isinstance(val, Token): raise QueryException("Virtual column 'pk__token' may only be compared to Token() values") @@ -432,7 +433,7 @@ def transaction(self, *args, **kwargs): len(val.value), len(partition_columns))) val.set_columns(partition_columns) - if isinstance(val, BaseQueryFunction): + if isinstance(val, BaseQueryFunction) or exists is True: query_val = val else: query_val = column.to_database(val) @@ -615,9 +616,14 @@ def defer(self, fields): return self._only_or_defer('defer', fields) def create(self, **kwargs): - return self.model(**kwargs).batch(self._batch).ttl(self._ttl).\ + + x = self.model(**kwargs).batch(self._batch) + x = x.ttl(self._ttl).\ consistency(self._consistency).\ - timestamp(self._timestamp).save() + timestamp(self._timestamp) + x = x.transaction(self._transaction).save() + + return x def delete(self): """ @@ -815,7 +821,7 @@ class DMLQuery(object): _consistency = None _timestamp = None - def __init__(self, model, instance=None, batch=None, ttl=None, consistency=None, timestamp=None): + def __init__(self, model, instance=None, batch=None, ttl=None, consistency=None, timestamp=None, transaction=None): self.model = model self.column_family_name = self.model.column_family_name() self.instance = instance @@ -823,6 +829,7 @@ def __init__(self, model, instance=None, batch=None, ttl=None, consistency=None, self._ttl = ttl self._consistency = consistency self._timestamp = timestamp + self._transaction = transaction def _execute(self, q): if self._batch: @@ -874,7 +881,8 @@ def update(self): raise CQLEngineException("DML Query intance attribute is None") assert type(self.instance) == self.model static_update_only = True - statement = UpdateStatement(self.column_family_name, ttl=self._ttl, timestamp=self._timestamp) + statement = UpdateStatement(self.column_family_name, ttl=self._ttl, + timestamp=self._timestamp, transactions=self._transaction) #get defined fields and their column names for name, col in self.model._columns.items(): if not col.is_primary_key: @@ -937,7 +945,8 @@ def save(self): if self.instance._has_counter or self.instance._can_update(): return self.update() else: - insert = InsertStatement(self.column_family_name, ttl=self._ttl, timestamp=self._timestamp) + insert = InsertStatement(self.column_family_name, ttl=self._ttl, + timestamp=self._timestamp, transactions=self._transaction) for name, col in self.instance._columns.items(): val = getattr(self.instance, name, None) if col._val_is_null(val): diff --git a/cqlengine/statements.py b/cqlengine/statements.py index 834db39179..e4a2aa5b77 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -152,6 +152,11 @@ def __unicode__(self): def insert_tuple(self): return self.field, self.context_id + def update_context(self, ctx): + if self.field not in ['exists', 'not_exists']: + return super(TransactionClause, self).update_context(ctx) + return ctx + class ContainerUpdateClause(AssignmentClause): @@ -646,10 +651,12 @@ def get_context(self): ctx = super(AssignmentStatement, self).get_context() for clause in self.assignments: clause.update_context(ctx) + for clause in self.transactions or []: + clause.update_context(ctx) return ctx - def _transactions(self): - return 'IF {}'.format(' AND '.join([six.text_type(c) for c in self.where_clauses])) + def _get_transactions(self): + return 'IF {}'.format(' AND '.join([six.text_type(c) for c in self.transactions])) class InsertStatement(AssignmentStatement): @@ -676,7 +683,7 @@ def __unicode__(self): qs += ["USING TIMESTAMP {}".format(self.timestamp_normalized)] if len(self.transactions) > 0: - qs += [self._transactions] + qs += [self._get_transactions()] return ' '.join(qs) @@ -705,7 +712,7 @@ def __unicode__(self): qs += [self._where] if len(self.transactions) > 0: - qs += [self._transactions] + qs += [self._get_transactions()] return ' '.join(qs) diff --git a/cqlengine/tests/model/test_transaction_statements.py b/cqlengine/tests/model/test_transaction_statements.py index 8afabb1ed2..a62ac122d3 100644 --- a/cqlengine/tests/model/test_transaction_statements.py +++ b/cqlengine/tests/model/test_transaction_statements.py @@ -4,10 +4,13 @@ from mock import patch from cqlengine.exceptions import ValidationError -from cqlengine.tests.base import BaseCassEngTestCase +from unittest import TestCase from cqlengine.models import Model from cqlengine import columns from cqlengine.management import sync_table, drop_table +from cqlengine import connection + +connection.setup(['192.168.56.103'], 'test') class TestUpdateModel(Model): @@ -18,7 +21,7 @@ class TestUpdateModel(Model): text = columns.Text(required=False, index=True) -class ModelUpdateTests(BaseCassEngTestCase): +class ModelUpdateTests(TestCase): @classmethod def setUpClass(cls): @@ -31,6 +34,12 @@ def tearDownClass(cls): drop_table(TestUpdateModel) def test_transaction_insertion(self): - m = TestUpdateModel(count=5, text='something').transaction(exists=True) - m.save() + m = TestUpdateModel.objects.create(count=5, text='something') + all_models = TestUpdateModel.objects.all() + for x in all_models: + pass + m = TestUpdateModel.objects + m = m.transaction(not_exists=True) + m = m.create(count=5, text='something') + m.transaction(count=6).update(text='something else') x = 10 \ No newline at end of file From dc2cdf629642bdaddbe94a2ed7d3a6ef9a17a52c Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 8 Oct 2014 14:22:01 -0500 Subject: [PATCH 0974/3726] Handle CUSTOM index meta and string export --- cassandra/metadata.py | 30 ++++++++++++++++++++++-------- 1 file changed, 22 insertions(+), 8 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 5a7e16c5a7..ec9062a712 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -339,7 +339,9 @@ def _build_index_metadata(self, column_metadata, row): index_name = row.get("index_name") index_type = row.get("index_type") if index_name or index_type: - return IndexMetadata(column_metadata, index_name, index_type) + options = row.get("index_options") + index_options = json.loads(options) if options else {} + return IndexMetadata(column_metadata, index_name, index_type, index_options) else: return None @@ -1058,21 +1060,33 @@ class IndexMetadata(object): index_type = None """ A string representing the type of index. """ - def __init__(self, column_metadata, index_name=None, index_type=None): + index_options = {} + """ A dict of index options. """ + + def __init__(self, column_metadata, index_name=None, index_type=None, index_options={}): self.column = column_metadata self.name = index_name self.index_type = index_type + self.index_options = index_options def as_cql_query(self): """ Returns a CQL query that can be used to recreate this index. """ table = self.column.table - return "CREATE INDEX %s ON %s.%s (%s)" % ( - self.name, # Cassandra doesn't like quoted index names for some reason - protect_name(table.keyspace.name), - protect_name(table.name), - protect_name(self.column.name)) + if self.index_type != "CUSTOM": + return "CREATE INDEX %s ON %s.%s (%s)" % ( + self.name, # Cassandra doesn't like quoted index names for some reason + protect_name(table.keyspace.name), + protect_name(table.name), + protect_name(self.column.name)) + else: + return "CREATE CUSTOM INDEX %s ON %s.%s (%s) USING '%s'" % ( + self.name, # Cassandra doesn't like quoted index names for some reason + protect_name(table.keyspace.name), + protect_name(table.name), + protect_name(self.column.name), + self.index_options["class_name"]) class TokenMap(object): @@ -1269,4 +1283,4 @@ def as_cql_query(self): protect_name(self.table.name), protect_value(self.options['class']) ) - return ret \ No newline at end of file + return ret From 9618dd984301553f3dac90d65ffb3ff2a36eca38 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 8 Oct 2014 14:29:39 -0500 Subject: [PATCH 0975/3726] Changelog catch-up --- CHANGELOG.rst | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index f2cebe9744..500fbb6297 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,14 @@ +2.1.2 +===== +In Progress + +Bug Fixes +--------- +* Handle Unauthorized message on schema_triggers query (PYTHON-155) +* Make execute_concurrent compatible with Python 2.6 (github-197) +* Pure Python sorted set in support of UDTs nested in collections (PYTON-167) +* Support CUSTOM index metadata and string export (PYTHON-165) + 2.1.1 ===== September 11, 2014 From f9874c7787393dac617c00321ac58246610df4a4 Mon Sep 17 00:00:00 2001 From: timmartin19 Date: Wed, 8 Oct 2014 17:11:19 -0400 Subject: [PATCH 0976/3726] Added a thrown exception when a transaction fails to be applied --- .gitignore | 3 +++ cqlengine/exceptions.py | 1 + cqlengine/query.py | 8 +++++++- 3 files changed, 11 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index b43bb3d158..f576678427 100644 --- a/.gitignore +++ b/.gitignore @@ -45,3 +45,6 @@ docs/_build #iPython *.ipynb + +cqlengine/tests/* +cqlengine/tests* \ No newline at end of file diff --git a/cqlengine/exceptions.py b/cqlengine/exceptions.py index 94a51b92ac..4066305220 100644 --- a/cqlengine/exceptions.py +++ b/cqlengine/exceptions.py @@ -2,5 +2,6 @@ class CQLEngineException(Exception): pass class ModelException(CQLEngineException): pass class ValidationError(CQLEngineException): pass +class TransactionException(CQLEngineException): pass class UndefinedKeyspaceException(CQLEngineException): pass diff --git a/cqlengine/query.py b/cqlengine/query.py index e85771b568..406d416161 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -6,7 +6,7 @@ from cqlengine.connection import execute -from cqlengine.exceptions import CQLEngineException, ValidationError +from cqlengine.exceptions import CQLEngineException, ValidationError, TransactionException from cqlengine.functions import Token, BaseQueryFunction, QueryValue, UnicodeMixin #CQL 3 reference: @@ -836,6 +836,12 @@ def _execute(self, q): return self._batch.add_query(q) else: tmp = execute(q, consistency_level=self._consistency) + if tmp and tmp[0].get('[applied]', True) is False: + tmp[0].pop('[applied]') + expected = ', '.join('{0}={1}'.format(t.field, t.value) for t in q.transactions) + actual = ', '.join('{0}={1}'.format(f, v) for f, v in tmp[0].items()) + message = 'Transaction statement failed: Expected: {0} Actual: {1}'.format(expected, actual) + raise TransactionException(message) return tmp def batch(self, batch_obj): From 0488813522e47dd8b06e881f5351b00992036549 Mon Sep 17 00:00:00 2001 From: Irina Kaplounova Date: Wed, 8 Oct 2014 23:24:05 -0700 Subject: [PATCH 0977/3726] Added support for PROTOCOL_VERSION=1 --- .../standard/test_authentication.py | 119 ++++++++++-------- 1 file changed, 64 insertions(+), 55 deletions(-) diff --git a/tests/integration/standard/test_authentication.py b/tests/integration/standard/test_authentication.py index ee9e32a526..10574bb5a4 100644 --- a/tests/integration/standard/test_authentication.py +++ b/tests/integration/standard/test_authentication.py @@ -19,7 +19,7 @@ from ccmlib import common from ccmlib.cluster import Cluster as CCMCluster -from tests.integration import setup_package, teardown_package, PROTOCOL_VERSION, CASSANDRA_DIR, CASSANDRA_VERSION, path +import tests.integration from cassandra.cluster import Cluster, NoHostAvailable, Unauthorized, InvalidRequest from cassandra.auth import PlainTextAuthProvider @@ -68,17 +68,19 @@ def try_connecting(username='', password=''): :param password: optional password for connection :return: True if can successfully connect to cluster within 2 minutes, False otherwise """ + if username and password: - ap = PlainTextAuthProvider(username=username, password=password) + ap = AuthenticationTests.get_authentication_provider(username, password) else: ap = None + maxwait = 120 # in seconds sleeptime = 1 wait_time = 0 while wait_time < maxwait: try: - cluster = Cluster(protocol_version=PROTOCOL_VERSION, auth_provider=ap) + cluster = Cluster(protocol_version=tests.integration.PROTOCOL_VERSION, auth_provider=ap) cluster.connect() log.debug("Can connect after %d seconds" % wait_time) return True @@ -93,23 +95,15 @@ def try_connecting(username='', password=''): # Module level setup and teardown # def setup_module(): - """ - Test is skipped if run with cql version < 2 - Stop existing cluster started by generic tests init - """ - - if PROTOCOL_VERSION < 2: - raise unittest.SkipTest( - "SASL Authentication is not supported in version 1 of the protocol") - teardown_package() + tests.integration.teardown_package() def teardown_module(): """ Start generic tests cluster """ - setup_package() + tests.integration.setup_package() wait_for_cassandra() try_connecting() @@ -143,46 +137,42 @@ def setup_class(cls): If invoked from authorization class enable authorization by setting 'authorizer': 'CassandraAuthorizer'. """ try: - try: - ccm_cluster = CCMCluster.load(path, AUTH_CLUSTER_NAME) - log.debug("Found existing ccm test authentication cluster, removing") - ccm_cluster.remove() - except Exception: - log.debug("Can not load cluster %s ....." % AUTH_CLUSTER_NAME) - - log.debug("Creating new ccm test authentication cluster") - if CASSANDRA_DIR: - ccm_cluster = CCMCluster(path, AUTH_CLUSTER_NAME, cassandra_dir=CASSANDRA_DIR) - else: - ccm_cluster = CCMCluster(path, AUTH_CLUSTER_NAME, cassandra_version=CASSANDRA_VERSION) - - ccm_cluster.set_configuration_options({'start_native_transport': True}) - ccm_cluster.set_configuration_options({'authenticator': 'PasswordAuthenticator'}) - - # - # This method is called either with AuthenticationTests class or with AuthorizedAuthenticationTests class. - # In the second case we enable CassandraAuthorizer - # - if cls.__name__ == 'AuthorizedAuthenticationTests': - print "Running tests with Cassandra Authorizer Enabled" - log.info("Running tests with Cassandra Authorizer Enabled") - ccm_cluster.set_configuration_options({'authorizer': 'CassandraAuthorizer'}) - else: - print "Running tests with Cassandra Authorizer Disabled" - log.info("Running tests with Cassandra Authorizer Disabled") - - common.switch_cluster(path, AUTH_CLUSTER_NAME) - ccm_cluster.populate(1) - - log.debug("Starting ccm test authentication cluster") - ccm_cluster.start(wait_for_binary_proto=True) + ccm_cluster = CCMCluster.load(tests.integration.path, AUTH_CLUSTER_NAME) + log.debug("Found existing ccm test authentication cluster, removing") + ccm_cluster.remove() except Exception: - log.exception("Failed to start ccm authentication cluster:") - raise + log.debug("Can not load cluster %s ....." % AUTH_CLUSTER_NAME) + + log.debug("Creating new ccm test authentication cluster") + if tests.integration.CASSANDRA_DIR: + ccm_cluster = CCMCluster(tests.integration.path, AUTH_CLUSTER_NAME, cassandra_dir=tests.integration.CASSANDRA_DIR) + else: + ccm_cluster = CCMCluster(tests.integration.path, AUTH_CLUSTER_NAME, cassandra_version=tests.integration.CASSANDRA_VERSION) + + ccm_cluster.set_configuration_options({'start_native_transport': True}) + ccm_cluster.set_configuration_options({'authenticator': 'PasswordAuthenticator'}) + + # + # This method is called either with AuthenticationTests class or with AuthorizedAuthenticationTests class. + # In the second case we enable CassandraAuthorizer + # + if cls.__name__ == 'AuthorizedAuthenticationTests': + print "Running tests with Cassandra Authorizer Enabled" + log.info("Running tests with Cassandra Authorizer Enabled") + ccm_cluster.set_configuration_options({'authorizer': 'CassandraAuthorizer'}) + else: + print "Running tests with Cassandra Authorizer Disabled" + log.info("Running tests with Cassandra Authorizer Disabled") + + common.switch_cluster(tests.integration.path, AUTH_CLUSTER_NAME) + ccm_cluster.populate(1) + + log.debug("Starting ccm test authentication cluster") + ccm_cluster.start(wait_for_binary_proto=True) if not wait_for_cassandra() or not try_connecting('cassandra', 'cassandra'): log.exception("Can not talk to cassandra") - raise + raise Exception('Can not talk to cassandra') log.debug("Switched to AUTH_CLUSTER_NAME cluster") cls.ccm_cluster = ccm_cluster @@ -218,6 +208,27 @@ def tearDown(self): def enable_authorizer(cls): cls.authorizer = True + @staticmethod + def v1_authentication_provider(username, password): + return dict(username=username, password=password) + + @staticmethod + def get_authentication_provider(username, password): + """ + Return correct authentication provider based on protocol version. + There is a difference in the semantics of authentication provider argument with protocol versions 1 and 2 + For protocol version 2 and higher it should be a PlainTextAuthProvider object. + For protocol version 1 it should be a function taking hostname as an argument and returning a dictionary + containing username and password. + :param username: authentication username + :param password: authentication password + :return: authentication object suitable for Cluster.connect() + """ + if tests.integration.PROTOCOL_VERSION < 2: + return lambda(hostname): dict(username=username, password=password) + else: + return PlainTextAuthProvider(username=username, password=password) + @staticmethod def create_user(cluster, usr, pwd, su=False): """ @@ -227,7 +238,7 @@ def create_user(cluster, usr, pwd, su=False): if su: status = 'SUPERUSER' session = cluster.connect() - session.execute("CREATE USER IF NOT EXISTS %s WITH PASSWORD '%s' %s " % (usr, pwd, status)) + session.execute("CREATE USER %s WITH PASSWORD '%s' %s " % (usr, pwd, status)) session.shutdown() @staticmethod @@ -263,9 +274,8 @@ def cluster_as(usr, pwd): :param pwd: user password :return: CLuster object """ - return Cluster(protocol_version=PROTOCOL_VERSION, - auth_provider=PlainTextAuthProvider(username=usr, - password=pwd)) + return Cluster(protocol_version=tests.integration.PROTOCOL_VERSION, + auth_provider=AuthenticationTests.get_authentication_provider(username=usr, password=pwd)) def test_new_user_created(self): """ @@ -487,7 +497,7 @@ def test_su_connect_no_user_specified(self): su_connect_no_user_specified: can't connect with an empty user No user specified triggers "AuthenticationFailed /Remote end requires authentication """ - cluster = Cluster(protocol_version=PROTOCOL_VERSION) + cluster = Cluster(protocol_version=tests.integration.PROTOCOL_VERSION) self.assertRaisesRegexp(NoHostAvailable, '.*AuthenticationFailed.*Remote end requires authentication.*', cluster.connect) @@ -498,4 +508,3 @@ class AuthorizedAuthenticationTests(AuthenticationTests): Same test as AuthenticationTests but enables authorization """ pass - From 263481f95962a1ab554e9dd4f4e272cd98cf77b0 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 9 Oct 2014 15:33:43 -0500 Subject: [PATCH 0978/3726] Allow DCAware LB to default local_dc based on contact_points PYTHON-148 --- cassandra/cluster.py | 18 ++++++++++--- cassandra/policies.py | 59 +++++++++++++++++++++++++------------------ 2 files changed, 49 insertions(+), 28 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 54e1bd3e3b..4e099b1d72 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -166,6 +166,18 @@ class Cluster(object): """ + contact_points = ['127.0.0.1'] + """ + The list of contact points to try connecting for cluster discovery. + + Defaults to loopback interface. + + Note: When using :class:`.DCAwareLoadBalancingPolicy` with no explicit + local_dc set, the DC is chosen from an arbitrary host in contact_points. + In this case, contact_points should contain only nodes from a single, + local DC. + """ + port = 9042 """ The server-side port to open connections to. Defaults to 9042. @@ -377,7 +389,7 @@ def auth_provider(self, value): _listener_lock = None def __init__(self, - contact_points=("127.0.0.1",), + contact_points=["127.0.0.1"], port=9042, compression=True, auth_provider=None, @@ -1946,8 +1958,8 @@ def _handle_results(success, result): responses = connection.wait_for_responses(*queries, fail_on_error=False) (ks_success, ks_result), (cf_success, cf_result), \ - (col_success, col_result), (types_success, types_result), \ - (trigger_success, triggers_result) = responses + (col_success, col_result), (types_success, types_result), \ + (trigger_success, triggers_result) = responses if ks_success: ks_result = dict_factory(*ks_result.results) diff --git a/cassandra/policies.py b/cassandra/policies.py index d09b2b41e0..9e16870c4f 100644 --- a/cassandra/policies.py +++ b/cassandra/policies.py @@ -204,11 +204,14 @@ class DCAwareRoundRobinPolicy(LoadBalancingPolicy): local_dc = None used_hosts_per_remote_dc = 0 - def __init__(self, local_dc, used_hosts_per_remote_dc=0): + def __init__(self, local_dc='', used_hosts_per_remote_dc=0): """ The `local_dc` parameter should be the name of the datacenter (such as is reported by ``nodetool ring``) that should - be considered local. + be considered local. If not specified, the driver will choose + a local_dc based on the first host among :attr:`.Cluster.contact_points` + having a valid DC. If relying on this mechanism, all specified + contact points should be nodes in a single, local DC. `used_hosts_per_remote_dc` controls how many nodes in each remote datacenter will have connections opened @@ -220,6 +223,8 @@ def __init__(self, local_dc, used_hosts_per_remote_dc=0): self.local_dc = local_dc self.used_hosts_per_remote_dc = used_hosts_per_remote_dc self._dc_live_hosts = {} + self._position = 0 + self._contact_points = [] LoadBalancingPolicy.__init__(self) def _dc(self, host): @@ -229,14 +234,10 @@ def populate(self, cluster, hosts): for dc, dc_hosts in groupby(hosts, lambda h: self._dc(h)): self._dc_live_hosts[dc] = tuple(set(dc_hosts)) - # position is currently only used for local hosts - local_live = self._dc_live_hosts.get(self.local_dc) - if not local_live: - self._position = 0 - elif len(local_live) == 1: - self._position = 0 - else: - self._position = randint(0, len(local_live) - 1) + if not self.local_dc: + self._contact_points = cluster.contact_points + + self._position = randint(0, len(hosts) - 1) if hosts else 0 def distance(self, host): dc = self._dc(host) @@ -274,32 +275,40 @@ def make_query_plan(self, working_keyspace=None, query=None): yield host def on_up(self, host): + + # not worrying about threads because this will happen during + # control connection startup/refresh + if not self.local_dc and host.datacenter: + if host.address in self._contact_points: + self.local_dc = host.datacenter + log.info("Using datacenter '%s' for DCAwareRoundRobinPolicy (via host '%s'); " + "if incorrect, please specify a local_dc to the constructor, " + "or limit contact points to local cluster nodes" % + (self.local_dc, host.address)) + del self._contact_points + dc = self._dc(host) with self._hosts_lock: - current_hosts = self._dc_live_hosts.setdefault(dc, ()) + current_hosts = self._dc_live_hosts.get(dc, ()) if host not in current_hosts: self._dc_live_hosts[dc] = current_hosts + (host, ) def on_down(self, host): dc = self._dc(host) with self._hosts_lock: - current_hosts = self._dc_live_hosts.setdefault(dc, ()) + current_hosts = self._dc_live_hosts.get(dc, ()) if host in current_hosts: - self._dc_live_hosts[dc] = tuple(h for h in current_hosts if h != host) + hosts = tuple(h for h in current_hosts if h != host) + if hosts: + self._dc_live_hosts[dc] = tuple(h for h in current_hosts if h != host) + else: + self._dc_live_hosts.pop(dc, None) def on_add(self, host): - dc = self._dc(host) - with self._hosts_lock: - current_hosts = self._dc_live_hosts.setdefault(dc, ()) - if host not in current_hosts: - self._dc_live_hosts[dc] = current_hosts + (host, ) + self.on_up(host) def on_remove(self, host): - dc = self._dc(host) - with self._hosts_lock: - current_hosts = self._dc_live_hosts.setdefault(dc, ()) - if host in current_hosts: - self._dc_live_hosts[dc] = tuple(h for h in current_hosts if h != host) + self.on_down(host) class TokenAwarePolicy(LoadBalancingPolicy): @@ -333,8 +342,8 @@ def check_supported(self): '%s cannot be used with the cluster partitioner (%s) because ' 'the relevant C extension for this driver was not compiled. ' 'See the installation instructions for details on building ' - 'and installing the C extensions.' % (self.__class__.__name__, - self._cluster_metadata.partitioner)) + 'and installing the C extensions.' % + (self.__class__.__name__, self._cluster_metadata.partitioner)) def distance(self, *args, **kwargs): return self._child_policy.distance(*args, **kwargs) From 0fded4feb23138b0e68407abc70be0fd9cbc743d Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 9 Oct 2014 16:31:52 -0500 Subject: [PATCH 0979/3726] Set routing key in BatchStatement.add() if none specified PYTHON-126 --- cassandra/query.py | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/cassandra/query.py b/cassandra/query.py index ffb5f8c234..1089d68e66 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -228,9 +228,9 @@ def _del_serial_consistency_level(self): self._serial_consistency_level = None serial_consistency_level = property( - _get_serial_consistency_level, - _set_serial_consistency_level, - _del_serial_consistency_level, + _get_serial_consistency_level, + _set_serial_consistency_level, + _del_serial_consistency_level, """ The serial consistency level is only used by conditional updates (``INSERT``, ``UPDATE`` and ``DELETE`` with an ``IF`` condition). For @@ -356,9 +356,8 @@ def from_message(cls, query_id, column_metadata, cluster_metadata, query, keyspa try: routing_key_indexes = [statement_indexes[c.name] for c in partition_key_columns] - except KeyError: - pass # we're missing a partition key component in the prepared - # statement; just leave routing_key_indexes as None + except KeyError: # we're missing a partition key component in the prepared + pass # statement; just leave routing_key_indexes as None return PreparedStatement(column_metadata, query_id, routing_key_indexes, query, keyspace, protocol_version) @@ -641,6 +640,7 @@ def add(self, statement, parameters=None): elif isinstance(statement, PreparedStatement): query_id = statement.query_id bound_statement = statement.bind(() if parameters is None else parameters) + self._maybe_set_routing_key(bound_statement) self._statements_and_parameters.append( (True, query_id, bound_statement.values)) elif isinstance(statement, BoundStatement): @@ -648,6 +648,7 @@ def add(self, statement, parameters=None): raise ValueError( "Parameters cannot be passed with a BoundStatement " "to BatchStatement.add()") + self._maybe_set_routing_key(statement) self._statements_and_parameters.append( (True, statement.prepared_statement.query_id, statement.values)) else: @@ -656,6 +657,7 @@ def add(self, statement, parameters=None): if parameters: encoder = Encoder() if self._session is None else self._session.encoder query_string = bind_params(query_string, parameters, encoder) + self._maybe_set_routing_key(statement) self._statements_and_parameters.append((False, query_string, ())) return self @@ -668,6 +670,10 @@ def add_all(self, statements, parameters): for statement, value in zip(statements, parameters): self.add(statement, parameters) + def _maybe_set_routing_key(self, statement): + if self.routing_key is None and statement.routing_key is not None: + self.routing_key = statement.routing_key + def __str__(self): consistency = ConsistencyLevel.value_to_name.get(self.consistency_level, 'Not Set') return (u'' % From a0ff7ff2727e3f60cdfbac67c7988068408e8372 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 9 Oct 2014 16:52:22 -0500 Subject: [PATCH 0980/3726] More changelog catch-up --- CHANGELOG.rst | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 500fbb6297..e349e82bae 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,13 @@ ===== In Progress +Features +-------- +* Allow DCAwareRoundRobinPolicy to be constructed without a local_dc, defaulting + instead to the DC of a contact_point (PYTHON-126) +* Set routing key in BatchStatement.add() if none specified in batch (PYTHON-148) +* Improved feedback on ValueError using named_tuple_factory with invalid column names (PYTHON-122) + Bug Fixes --------- * Handle Unauthorized message on schema_triggers query (PYTHON-155) From f8d183e25d8666f589b2e7c1fa48d0f3f0ffb7fb Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 10 Oct 2014 09:52:00 -0500 Subject: [PATCH 0981/3726] Error handling for bad column names in named tuple Emit warning with workarounds when named_tuple_factory is used with result column names that can't be used in namedtuple. Try to return results with renamed, positional column names. --- cassandra/query.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/cassandra/query.py b/cassandra/query.py index ffb5f8c234..1ada26335c 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -99,7 +99,17 @@ def named_tuple_factory(colnames, rows): .. versionchanged:: 2.0.0 moved from ``cassandra.decoder`` to ``cassandra.query`` """ - Row = namedtuple('Row', map(_clean_column_name, colnames)) + clean_column_names = map(_clean_column_name, colnames) + try: + Row = namedtuple('Row', clean_column_names) + except: + log.warn("Failed creating named tuple for results with column names %s (cleaned: %s) (see Python 'namedtuple' documentation for details on name rules). " + "Results will be returned with positional names. " + "Avoid this by choosing different names, using SELECT \"\" AS aliases, " + "or specifying a different row_factory on your Session" % + (colnames, clean_column_names)) + Row = namedtuple('Row', clean_column_names, rename=True) + return [Row(*row) for row in rows] From 6bfc0e8d2d64264f7c46ea4b0fd6009b5a59ebdb Mon Sep 17 00:00:00 2001 From: Paul McNett Date: Mon, 13 Oct 2014 05:59:35 -0700 Subject: [PATCH 0982/3726] Improve documentation of function with explicit args Makes it explicit that concurrency and raise_on_first_error are supported by this alternate function. --- cassandra/concurrent.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/cassandra/concurrent.py b/cassandra/concurrent.py index f79f7f0be2..35247d95f1 100644 --- a/cassandra/concurrent.py +++ b/cassandra/concurrent.py @@ -54,7 +54,7 @@ def execute_concurrent(session, statements_and_parameters, concurrency=100, rais statements_and_params = [] for user_id in user_ids: params = (user_id, ) - statatements_and_params.append((select_statement, params)) + statements_and_params.append((select_statement, params)) results = execute_concurrent( session, statements_and_params, raise_on_first_error=False) @@ -73,7 +73,7 @@ def execute_concurrent(session, statements_and_parameters, concurrency=100, rais return [] # TODO handle iterators and generators naturally without converting the - # whole thing to a list. This would requires not building a result + # whole thing to a list. This would require not building a result # list of Nones up front (we don't know how many results there will be), # so a dict keyed by index should be used instead. The tricky part is # knowing when you're the final statement to finish. @@ -100,7 +100,7 @@ def execute_concurrent(session, statements_and_parameters, concurrency=100, rais return results -def execute_concurrent_with_args(session, statement, parameters, *args, **kwargs): +def execute_concurrent_with_args(session, statement, parameters, concurrency=100, raise_on_first_error=True): """ Like :meth:`~cassandra.concurrent.execute_concurrent()`, but takes a single statement and a sequence of parameters. Each item in ``parameters`` @@ -112,7 +112,7 @@ def execute_concurrent_with_args(session, statement, parameters, *args, **kwargs parameters = [(x,) for x in range(1000)] execute_concurrent_with_args(session, statement, parameters) """ - return execute_concurrent(session, list(zip(cycle((statement,)), parameters)), *args, **kwargs) + return execute_concurrent(session, list(zip(cycle((statement,)), parameters)), concurrency=concurrency, raise_on_first_error=True) _sentinel = object() From 98350bbd0f6ea6c3a869ddea414e691002bf57ff Mon Sep 17 00:00:00 2001 From: Paul McNett Date: Mon, 13 Oct 2014 08:07:39 -0700 Subject: [PATCH 0983/3726] Trivial: fix typos; improve docstring example It's not explicit that the alternate function accepts all the arguments of execute_concurrent until looking at the source code. I think that adding the concurrency=50 argument to the example in the docstring satisfies this. My first idea was to explicitly name the concurrency and exit_on_first_error args, but then we'd have to maintain two functions in the future instead of of one. --- cassandra/concurrent.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cassandra/concurrent.py b/cassandra/concurrent.py index 35247d95f1..93b954ffa3 100644 --- a/cassandra/concurrent.py +++ b/cassandra/concurrent.py @@ -100,7 +100,7 @@ def execute_concurrent(session, statements_and_parameters, concurrency=100, rais return results -def execute_concurrent_with_args(session, statement, parameters, concurrency=100, raise_on_first_error=True): +def execute_concurrent_with_args(session, statement, parameters, *args, **kwargs): """ Like :meth:`~cassandra.concurrent.execute_concurrent()`, but takes a single statement and a sequence of parameters. Each item in ``parameters`` @@ -110,9 +110,9 @@ def execute_concurrent_with_args(session, statement, parameters, concurrency=100 statement = session.prepare("INSERT INTO mytable (a, b) VALUES (1, ?)") parameters = [(x,) for x in range(1000)] - execute_concurrent_with_args(session, statement, parameters) + execute_concurrent_with_args(session, statement, parameters, concurrency=50) """ - return execute_concurrent(session, list(zip(cycle((statement,)), parameters)), concurrency=concurrency, raise_on_first_error=True) + return execute_concurrent(session, list(zip(cycle((statement,)), parameters)), *args, **kwargs) _sentinel = object() From 387fa14df0198d89ba47cc29e7f05b4cceacc265 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 13 Oct 2014 12:32:00 -0500 Subject: [PATCH 0984/3726] Use alredy generated hosts list in assignment. Code review tweaks --- cassandra/policies.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/cassandra/policies.py b/cassandra/policies.py index 9e16870c4f..94363db931 100644 --- a/cassandra/policies.py +++ b/cassandra/policies.py @@ -275,7 +275,6 @@ def make_query_plan(self, working_keyspace=None, query=None): yield host def on_up(self, host): - # not worrying about threads because this will happen during # control connection startup/refresh if not self.local_dc and host.datacenter: @@ -300,9 +299,9 @@ def on_down(self, host): if host in current_hosts: hosts = tuple(h for h in current_hosts if h != host) if hosts: - self._dc_live_hosts[dc] = tuple(h for h in current_hosts if h != host) + self._dc_live_hosts[dc] = hosts else: - self._dc_live_hosts.pop(dc, None) + del self._dc_live_hosts[dc] def on_add(self, host): self.on_up(host) From 99b4872ba503409e8dc8a0b720fc454364c73f97 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 13 Oct 2014 13:11:17 -0500 Subject: [PATCH 0985/3726] Copy internal ist for sortedset::copy --- cassandra/util.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/util.py b/cassandra/util.py index 5c078c6f8d..d02219ffdc 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -458,7 +458,7 @@ def clear(self): def copy(self): new = sortedset() - new._items = self._items + new._items = list(self._items) return new def isdisjoint(self, other): From a7b48ca6e67f748bdbac6f8e0fff6e9f7c1de40d Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 14 Oct 2014 20:20:43 -0500 Subject: [PATCH 0986/3726] Normalize test class names. --- tests/unit/test_marshalling.py | 2 +- tests/unit/test_metadata.py | 10 +++++----- tests/unit/test_policies.py | 4 ++-- tests/unit/test_sortedset.py | 2 +- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/tests/unit/test_marshalling.py b/tests/unit/test_marshalling.py index 18dbf2f8a5..05239b4798 100644 --- a/tests/unit/test_marshalling.py +++ b/tests/unit/test_marshalling.py @@ -100,7 +100,7 @@ marshalled_value_pairs += marshalled_value_pairs_unsafe -class TestUnmarshal(unittest.TestCase): +class UnmarshalTest(unittest.TestCase): def test_unmarshalling(self): for serializedval, valtype, nativeval in marshalled_value_pairs: unmarshaller = lookup_casstype(valtype) diff --git a/tests/unit/test_metadata.py b/tests/unit/test_metadata.py index 6c35a620ce..912042bd19 100644 --- a/tests/unit/test_metadata.py +++ b/tests/unit/test_metadata.py @@ -31,7 +31,7 @@ from cassandra.pool import Host -class TestStrategies(unittest.TestCase): +class StrategiesTest(unittest.TestCase): @classmethod def setUpClass(cls): @@ -161,7 +161,7 @@ def test_ss_equals(self): self.assertNotEqual(SimpleStrategy(1), NetworkTopologyStrategy({'dc1': 2})) -class TestNameEscaping(unittest.TestCase): +class NameEscapingTest(unittest.TestCase): def test_protect_name(self): """ @@ -221,7 +221,7 @@ def test_is_valid_name(self): self.assertEqual(is_valid_name(keyword), False) -class TestTokens(unittest.TestCase): +class TokensTest(unittest.TestCase): def test_murmur3_tokens(self): try: @@ -255,7 +255,7 @@ def test_bytes_tokens(self): pass -class TestKeyspaceMetadata(unittest.TestCase): +class KeyspaceMetadataTest(unittest.TestCase): def test_export_as_string_user_types(self): keyspace_name = 'test' @@ -297,7 +297,7 @@ def mock_user_type(self, cassname, typename): return Mock(**{'cassname': cassname, 'typename': typename, 'cql_parameterized_type.return_value': typename}) -class TestUserTypes(unittest.TestCase): +class UserTypesTest(unittest.TestCase): def test_as_cql_query(self): field_types = [IntegerType, AsciiType, TupleType.apply_parameters([IntegerType, AsciiType])] diff --git a/tests/unit/test_policies.py b/tests/unit/test_policies.py index 7ecac04a52..f66faf7f9f 100644 --- a/tests/unit/test_policies.py +++ b/tests/unit/test_policies.py @@ -40,7 +40,7 @@ from six.moves import xrange -class TestLoadBalancingPolicy(unittest.TestCase): +class LoadBalancingPolicyTest(unittest.TestCase): def test_non_implemented(self): """ Code coverage for interface-style base class @@ -62,7 +62,7 @@ def test_instance_check(self): self.assertRaises(TypeError, Cluster, load_balancing_policy=RoundRobinPolicy) -class TestRoundRobinPolicy(unittest.TestCase): +class RoundRobinPolicyTest(unittest.TestCase): def test_basic(self): hosts = [0, 1, 2, 3] diff --git a/tests/unit/test_sortedset.py b/tests/unit/test_sortedset.py index 159f5a257e..161fe56b92 100644 --- a/tests/unit/test_sortedset.py +++ b/tests/unit/test_sortedset.py @@ -21,7 +21,7 @@ from cassandra.cqltypes import EMPTY -class TestSortedSet(unittest.TestCase): +class SortedSetTest(unittest.TestCase): def test_init(self): input = [5, 4, 3, 2, 1, 1, 1] expected = sorted(set(input)) From a639afa6bf71b4b83ee520d0e7aaab7d5d8f2a3b Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 14 Oct 2014 20:22:58 -0500 Subject: [PATCH 0987/3726] Remove superfluous mock spec arguments for DCA LBP --- tests/unit/test_policies.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/tests/unit/test_policies.py b/tests/unit/test_policies.py index f66faf7f9f..8b65a9df19 100644 --- a/tests/unit/test_policies.py +++ b/tests/unit/test_policies.py @@ -202,14 +202,14 @@ def test_with_remotes(self): # allow all of the remote hosts to be used policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=2) - policy.populate(Mock(spec=Metadata), hosts) + policy.populate(Mock(), hosts) qplan = list(policy.make_query_plan()) self.assertEqual(set(qplan[:2]), local_hosts) self.assertEqual(set(qplan[2:]), remote_hosts) # allow only one of the remote hosts to be used policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=1) - policy.populate(Mock(spec=Metadata), hosts) + policy.populate(Mock(), hosts) qplan = list(policy.make_query_plan()) self.assertEqual(set(qplan[:2]), local_hosts) @@ -219,7 +219,7 @@ def test_with_remotes(self): # allow no remote hosts to be used policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=0) - policy.populate(Mock(spec=Metadata), hosts) + policy.populate(Mock(), hosts) qplan = list(policy.make_query_plan()) self.assertEqual(2, len(qplan)) self.assertEqual(local_hosts, set(qplan)) @@ -228,7 +228,7 @@ def test_get_distance(self): policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=0) host = Host("ip1", SimpleConvictionPolicy) host.set_location_info("dc1", "rack1") - policy.populate(Mock(spec=Metadata), [host]) + policy.populate(Mock(), [host]) self.assertEqual(policy.distance(host), HostDistance.LOCAL) @@ -242,14 +242,14 @@ def test_get_distance(self): self.assertEqual(policy.distance(remote_host), HostDistance.IGNORED) # make sure the policy has both dcs registered - policy.populate(Mock(spec=Metadata), [host, remote_host]) + policy.populate(Mock(), [host, remote_host]) self.assertEqual(policy.distance(remote_host), HostDistance.REMOTE) # since used_hosts_per_remote_dc is set to 1, only the first # remote host in dc2 will be REMOTE, the rest are IGNORED second_remote_host = Host("ip3", SimpleConvictionPolicy) second_remote_host.set_location_info("dc2", "rack1") - policy.populate(Mock(spec=Metadata), [host, remote_host, second_remote_host]) + policy.populate(Mock(), [host, remote_host, second_remote_host]) distances = set([policy.distance(remote_host), policy.distance(second_remote_host)]) self.assertEqual(distances, set([HostDistance.REMOTE, HostDistance.IGNORED])) @@ -261,7 +261,7 @@ def test_status_updates(self): h.set_location_info("dc2", "rack1") policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=1) - policy.populate(Mock(spec=Metadata), hosts) + policy.populate(Mock(), hosts) policy.on_down(hosts[0]) policy.on_remove(hosts[2]) @@ -303,7 +303,7 @@ def test_no_live_nodes(self): hosts.append(h) policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=1) - policy.populate(Mock(spec=Metadata), hosts) + policy.populate(Mock(), hosts) for host in hosts: policy.on_down(host) From 5ca7eab0ae64bac096be3c47c0c4bf68c723f300 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 14 Oct 2014 21:19:00 -0500 Subject: [PATCH 0988/3726] Test for DCAware default DC (PYTHON-148) --- tests/unit/test_policies.py | 39 +++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/tests/unit/test_policies.py b/tests/unit/test_policies.py index 8b65a9df19..7fb99b8ceb 100644 --- a/tests/unit/test_policies.py +++ b/tests/unit/test_policies.py @@ -322,6 +322,45 @@ def test_no_nodes(self): qplan = list(policy.make_query_plan()) self.assertEqual(qplan, []) + def test_default_dc(self): + host_local = Host(1, SimpleConvictionPolicy, 'local') + host_remote = Host(2, SimpleConvictionPolicy, 'remote') + host_none = Host(1, SimpleConvictionPolicy) + + # contact point is '1' + cluster = Mock(contact_points=[1]) + + # contact DC first + policy = DCAwareRoundRobinPolicy() + policy.populate(cluster, [host_none]) + self.assertFalse(policy.local_dc) + policy.on_add(host_local) + policy.on_add(host_remote) + self.assertNotEqual(policy.local_dc, host_remote.datacenter) + self.assertEqual(policy.local_dc, host_local.datacenter) + + # contact DC second + policy = DCAwareRoundRobinPolicy() + policy.populate(cluster, [host_none]) + self.assertFalse(policy.local_dc) + policy.on_add(host_remote) + policy.on_add(host_local) + self.assertNotEqual(policy.local_dc, host_remote.datacenter) + self.assertEqual(policy.local_dc, host_local.datacenter) + + # no DC + policy = DCAwareRoundRobinPolicy() + policy.populate(cluster, [host_none]) + self.assertFalse(policy.local_dc) + policy.on_add(host_none) + self.assertFalse(policy.local_dc) + + # only other DC + policy = DCAwareRoundRobinPolicy() + policy.populate(cluster, [host_none]) + self.assertFalse(policy.local_dc) + policy.on_add(host_remote) + self.assertFalse(policy.local_dc) class TokenAwarePolicyTest(unittest.TestCase): From 4fe15f6a6f08efdeef04d5f0a897b0f98bd565f8 Mon Sep 17 00:00:00 2001 From: Irina Kaplounova Date: Tue, 14 Oct 2014 20:19:21 -0700 Subject: [PATCH 0989/3726] Test for PYTHON-122 --- tests/integration/standard/test_query.py | 52 +++++++++++++++++++++++- 1 file changed, 50 insertions(+), 2 deletions(-) diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index cacda2a2ae..c9a22ec3be 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -306,7 +306,6 @@ def test_no_parameters(self): class SerialConsistencyTests(unittest.TestCase): - def setUp(self): if PROTOCOL_VERSION < 2: raise unittest.SkipTest( @@ -359,7 +358,6 @@ def test_bad_consistency_level(self): class LightweightTransactionsTests(unittest.TestCase): - def setUp(self): """ Test is skipped if run with cql version < 2 @@ -423,3 +421,53 @@ def test_no_connection_refused_on_timeout(self): # Make sure test passed self.assertTrue(received_timeout) + + +class NamedTupleFactoryAndNumericColumnNames(unittest.TestCase): + """ + Test for PYTHON-122: Improve Error Handling/Reporting for named_tuple_factory and Numeric Column Names + """ + def setUp(self): + self.cluster = Cluster(protocol_version=PROTOCOL_VERSION) + self.session = self.cluster.connect() + + ddl = ''' + CREATE TABLE test3rf.ToBeIndexed ( key blob PRIMARY KEY, "626972746864617465" blob ) + WITH COMPACT STORAGE''' + + self.session.execute(ddl) + + def tearDown(self): + """ + Shutdown cluster + """ + self.session.execute("DROP TABLE test3rf.ToBeIndexed") + self.cluster.shutdown() + + def test_no_exception_on_select(self): + """ + no exception on SELECT for numeric column name + """ + try: + self.session.execute('SELECT * FROM test3rf.ToBeIndexed') + except ValueError as e: + self.fail("Unexpected ValueError exception: %s" % e.message) + + def test_can_select_using_alias(self): + """ + can SELECT "" AS aliases + """ + try: + self.session.execute('SELECT key, "626972746864617465" AS my_col from test3rf.ToBeIndexed') + except ValueError as e: + self.fail("Unexpected ValueError exception: %s" % e.message) + + def test_can_select_with_dict_factory(self): + """ + can SELECT numeric column using dict_factory + """ + self.session.row_factory = dict_factory + try: + self.session.execute('SELECT * FROM test3rf.ToBeIndexed') + except ValueError as e: + self.fail("Unexpected ValueError exception: %s" % e.message) From 423dccd6fa544a38c52982997b1c7e572331c48d Mon Sep 17 00:00:00 2001 From: timmartin19 Date: Wed, 15 Oct 2014 09:31:05 -0400 Subject: [PATCH 0990/3726] Reverted tests to original --- cqlengine/VERSION | 2 +- cqlengine/tests/management/test_management.py | 48 +++++++++++++------ .../model/test_transaction_statements.py | 45 ----------------- setup.py | 4 +- 4 files changed, 36 insertions(+), 63 deletions(-) delete mode 100644 cqlengine/tests/model/test_transaction_statements.py diff --git a/cqlengine/VERSION b/cqlengine/VERSION index 249afd517d..503a21deb4 100644 --- a/cqlengine/VERSION +++ b/cqlengine/VERSION @@ -1 +1 @@ -0.18.1 +0.18.2 diff --git a/cqlengine/tests/management/test_management.py b/cqlengine/tests/management/test_management.py index c2fb19afc2..3bda88ed9d 100644 --- a/cqlengine/tests/management/test_management.py +++ b/cqlengine/tests/management/test_management.py @@ -8,8 +8,9 @@ from cqlengine.tests.query.test_queryset import TestModel from cqlengine.models import Model from cqlengine import columns, SizeTieredCompactionStrategy, LeveledCompactionStrategy - - +from unittest import skipUnless +from cqlengine.connection import get_cluster +cluster = get_cluster() class CreateKeyspaceTest(BaseCassEngTestCase): def test_create_succeeeds(self): @@ -145,22 +146,25 @@ def test_set_table_properties(self): sync_table(ModelWithTableProperties) expected = {'bloom_filter_fp_chance': 0.76328, - 'caching': CACHING_ALL, 'comment': 'TxfguvBdzwROQALmQBOziRMbkqVGFjqcJfVhwGR', 'gc_grace_seconds': 2063, - 'populate_io_cache_on_flush': True, 'read_repair_chance': 0.17985, - 'replicate_on_write': False # For some reason 'dclocal_read_repair_chance' in CQL is called # just 'local_read_repair_chance' in the schema table. # Source: https://issues.apache.org/jira/browse/CASSANDRA-6717 # TODO: due to a bug in the native driver i'm not seeing the local read repair chance show up # 'local_read_repair_chance': 0.50811, } + if CASSANDRA_VERSION <= 20: + expected['caching'] = CACHING_ALL + expected['replicate_on_write'] = False + + if CASSANDRA_VERSION == 20: + expected['populate_io_cache_on_flush'] = True + expected['index_interval'] = 98706 if CASSANDRA_VERSION >= 20: expected['default_time_to_live'] = 4756 - expected['index_interval'] = 98706 expected['memtable_flush_period_in_ms'] = 43681 self.assertDictContainsSubset(expected, management.get_table_settings(ModelWithTableProperties).options) @@ -186,19 +190,24 @@ def test_table_property_update(self): table_settings = management.get_table_settings(ModelWithTableProperties).options expected = {'bloom_filter_fp_chance': 0.66778, - 'caching': CACHING_NONE, 'comment': 'xirAkRWZVVvsmzRvXamiEcQkshkUIDINVJZgLYSdnGHweiBrAiJdLJkVohdRy', 'gc_grace_seconds': 96362, - 'populate_io_cache_on_flush': False, 'read_repair_chance': 0.2989, - 'replicate_on_write': True # TODO see above comment re: native driver missing local read repair chance - # 'local_read_repair_chance': 0.12732, + #'local_read_repair_chance': 0.12732, } if CASSANDRA_VERSION >= 20: expected['memtable_flush_period_in_ms'] = 60210 expected['default_time_to_live'] = 65178 + + if CASSANDRA_VERSION == 20: expected['index_interval'] = 94207 + # these featuers removed in cassandra 2.1 + if CASSANDRA_VERSION <= 20: + expected['caching'] = CACHING_NONE + expected['replicate_on_write'] = True + expected['populate_io_cache_on_flush'] = False + self.assertDictContainsSubset(expected, table_settings) @@ -247,6 +256,7 @@ def test_failure(self): sync_table(self.FakeModel) +@skipUnless(cluster.protocol_version >= 2, "only runs against the cql3 protocol v2.0") def test_static_columns(): class StaticModel(Model): id = columns.Integer(primary_key=True) @@ -260,15 +270,23 @@ class StaticModel(Model): from cqlengine.connection import get_session session = get_session() - with patch.object(session, "execute", side_effect=Exception) as m: - try: - sync_table(StaticModel) - except: - pass + with patch.object(session, "execute", wraps=session.execute) as m: + sync_table(StaticModel) assert m.call_count > 0 statement = m.call_args[0][0].query_string assert '"name" text static' in statement, statement + # if we sync again, we should not apply an alter w/ a static + sync_table(StaticModel) + + with patch.object(session, "execute", wraps=session.execute) as m2: + sync_table(StaticModel) + + assert len(m2.call_args_list) == 1 + assert "ALTER" not in m2.call_args[0][0].query_string + + + diff --git a/cqlengine/tests/model/test_transaction_statements.py b/cqlengine/tests/model/test_transaction_statements.py deleted file mode 100644 index a62ac122d3..0000000000 --- a/cqlengine/tests/model/test_transaction_statements.py +++ /dev/null @@ -1,45 +0,0 @@ -__author__ = 'Tim Martin' -from uuid import uuid4 - -from mock import patch -from cqlengine.exceptions import ValidationError - -from unittest import TestCase -from cqlengine.models import Model -from cqlengine import columns -from cqlengine.management import sync_table, drop_table -from cqlengine import connection - -connection.setup(['192.168.56.103'], 'test') - - -class TestUpdateModel(Model): - __keyspace__ = 'test' - partition = columns.UUID(primary_key=True, default=uuid4) - cluster = columns.UUID(primary_key=True, default=uuid4) - count = columns.Integer(required=False) - text = columns.Text(required=False, index=True) - - -class ModelUpdateTests(TestCase): - - @classmethod - def setUpClass(cls): - super(ModelUpdateTests, cls).setUpClass() - sync_table(TestUpdateModel) - - @classmethod - def tearDownClass(cls): - super(ModelUpdateTests, cls).tearDownClass() - drop_table(TestUpdateModel) - - def test_transaction_insertion(self): - m = TestUpdateModel.objects.create(count=5, text='something') - all_models = TestUpdateModel.objects.all() - for x in all_models: - pass - m = TestUpdateModel.objects - m = m.transaction(not_exists=True) - m = m.create(count=5, text='something') - m.transaction(count=6).update(text='something else') - x = 10 \ No newline at end of file diff --git a/setup.py b/setup.py index cf35583c32..0b9a9af35e 100644 --- a/setup.py +++ b/setup.py @@ -17,7 +17,7 @@ """ setup( - name='cqlengine', + name='vk-cqlengine', version=version, description='Cassandra CQL 3 Object Mapper for Python', long_description=long_desc, @@ -35,7 +35,7 @@ author='Blake Eggleston, Jon Haddad', author_email='bdeggleston@gmail.com, jon@jonhaddad.com', url='https://github.com/cqlengine/cqlengine', - license='BSD', + license='Private', packages=find_packages(), include_package_data=True, ) From 7a1646a1854c17aa12b9e443a9e4302b593d18b2 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 15 Oct 2014 08:41:34 -0500 Subject: [PATCH 0991/3726] Normalize standard integration test class names. --- tests/integration/standard/test_connection.py | 6 +++--- tests/integration/standard/test_factories.py | 2 +- tests/integration/standard/test_metadata.py | 2 +- tests/integration/standard/test_query.py | 6 +++--- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/tests/integration/standard/test_connection.py b/tests/integration/standard/test_connection.py index 0d3b1bfbdb..243f1edb55 100644 --- a/tests/integration/standard/test_connection.py +++ b/tests/integration/standard/test_connection.py @@ -35,7 +35,7 @@ LibevConnection = None -class ConnectionTest(object): +class ConnectionTests(object): klass = None @@ -221,7 +221,7 @@ def send_msgs(conn, event): t.join() -class AsyncoreConnectionTest(ConnectionTest, unittest.TestCase): +class AsyncoreConnectionTests(ConnectionTests, unittest.TestCase): klass = AsyncoreConnection @@ -231,7 +231,7 @@ def setUp(self): ConnectionTest.setUp(self) -class LibevConnectionTest(ConnectionTest, unittest.TestCase): +class LibevConnectionTests(ConnectionTests, unittest.TestCase): klass = LibevConnection diff --git a/tests/integration/standard/test_factories.py b/tests/integration/standard/test_factories.py index 87b5fd6e73..ef15ca99d8 100644 --- a/tests/integration/standard/test_factories.py +++ b/tests/integration/standard/test_factories.py @@ -24,7 +24,7 @@ from cassandra.util import OrderedDict -class TestFactories(unittest.TestCase): +class RowFactoryTests(unittest.TestCase): """ Test different row_factories and access code """ diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 72bb847700..635490b744 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -31,7 +31,7 @@ from tests.integration import get_cluster, PROTOCOL_VERSION, get_server_versions -class SchemaMetadataTest(unittest.TestCase): +class SchemaMetadataTests(unittest.TestCase): ksname = "schemametadatatest" diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index c9a22ec3be..58ae729dcf 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -30,7 +30,7 @@ from tests.integration import PROTOCOL_VERSION -class QueryTest(unittest.TestCase): +class QueryTests(unittest.TestCase): def test_query(self): cluster = Cluster(protocol_version=PROTOCOL_VERSION) @@ -357,7 +357,7 @@ def test_bad_consistency_level(self): self.assertRaises(ValueError, SimpleStatement, 'foo', serial_consistency_level=ConsistencyLevel.ONE) -class LightweightTransactionsTests(unittest.TestCase): +class LightweightTransactionTests(unittest.TestCase): def setUp(self): """ Test is skipped if run with cql version < 2 @@ -423,7 +423,7 @@ def test_no_connection_refused_on_timeout(self): self.assertTrue(received_timeout) -class NamedTupleFactoryAndNumericColumnNames(unittest.TestCase): +class NamedTupleFactoryAndNumericColNamesTests(unittest.TestCase): """ Test for PYTHON-122: Improve Error Handling/Reporting for named_tuple_factory and Numeric Column Names """ From 073f4a155b83a3edbd63e5114da2d82cbb5cafa3 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 15 Oct 2014 08:47:16 -0500 Subject: [PATCH 0992/3726] Move NamedTupleFactoryAndNumericColNamesTests to row_factories tests. --- tests/integration/standard/test_query.py | 50 ------------------- ...est_factories.py => test_row_factories.py} | 50 +++++++++++++++++++ 2 files changed, 50 insertions(+), 50 deletions(-) rename tests/integration/standard/{test_factories.py => test_row_factories.py} (70%) diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index 58ae729dcf..7ed03dc18a 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -421,53 +421,3 @@ def test_no_connection_refused_on_timeout(self): # Make sure test passed self.assertTrue(received_timeout) - - -class NamedTupleFactoryAndNumericColNamesTests(unittest.TestCase): - """ - Test for PYTHON-122: Improve Error Handling/Reporting for named_tuple_factory and Numeric Column Names - """ - def setUp(self): - self.cluster = Cluster(protocol_version=PROTOCOL_VERSION) - self.session = self.cluster.connect() - - ddl = ''' - CREATE TABLE test3rf.ToBeIndexed ( key blob PRIMARY KEY, "626972746864617465" blob ) - WITH COMPACT STORAGE''' - - self.session.execute(ddl) - - def tearDown(self): - """ - Shutdown cluster - """ - self.session.execute("DROP TABLE test3rf.ToBeIndexed") - self.cluster.shutdown() - - def test_no_exception_on_select(self): - """ - no exception on SELECT for numeric column name - """ - try: - self.session.execute('SELECT * FROM test3rf.ToBeIndexed') - except ValueError as e: - self.fail("Unexpected ValueError exception: %s" % e.message) - - def test_can_select_using_alias(self): - """ - can SELECT "" AS aliases - """ - try: - self.session.execute('SELECT key, "626972746864617465" AS my_col from test3rf.ToBeIndexed') - except ValueError as e: - self.fail("Unexpected ValueError exception: %s" % e.message) - - def test_can_select_with_dict_factory(self): - """ - can SELECT numeric column using dict_factory - """ - self.session.row_factory = dict_factory - try: - self.session.execute('SELECT * FROM test3rf.ToBeIndexed') - except ValueError as e: - self.fail("Unexpected ValueError exception: %s" % e.message) diff --git a/tests/integration/standard/test_factories.py b/tests/integration/standard/test_row_factories.py similarity index 70% rename from tests/integration/standard/test_factories.py rename to tests/integration/standard/test_row_factories.py index ef15ca99d8..189a0a9d15 100644 --- a/tests/integration/standard/test_factories.py +++ b/tests/integration/standard/test_row_factories.py @@ -137,3 +137,53 @@ def test_ordered_dict_factory(self): self.assertEqual(result[0]['k'], 1) self.assertEqual(result[1]['k'], result[1]['v']) self.assertEqual(result[1]['k'], 2) + + +class NamedTupleFactoryAndNumericColNamesTests(unittest.TestCase): + """ + Test for PYTHON-122: Improve Error Handling/Reporting for named_tuple_factory and Numeric Column Names + """ + def setUp(self): + self.cluster = Cluster(protocol_version=PROTOCOL_VERSION) + self.session = self.cluster.connect() + + ddl = ''' + CREATE TABLE test3rf.ToBeIndexed ( key blob PRIMARY KEY, "626972746864617465" blob ) + WITH COMPACT STORAGE''' + + self.session.execute(ddl) + + def tearDown(self): + """ + Shutdown cluster + """ + self.session.execute("DROP TABLE test3rf.ToBeIndexed") + self.cluster.shutdown() + + def test_no_exception_on_select(self): + """ + no exception on SELECT for numeric column name + """ + try: + self.session.execute('SELECT * FROM test3rf.ToBeIndexed') + except ValueError as e: + self.fail("Unexpected ValueError exception: %s" % e.message) + + def test_can_select_using_alias(self): + """ + can SELECT "" AS aliases + """ + try: + self.session.execute('SELECT key, "626972746864617465" AS my_col from test3rf.ToBeIndexed') + except ValueError as e: + self.fail("Unexpected ValueError exception: %s" % e.message) + + def test_can_select_with_dict_factory(self): + """ + can SELECT numeric column using dict_factory + """ + self.session.row_factory = dict_factory + try: + self.session.execute('SELECT * FROM test3rf.ToBeIndexed') + except ValueError as e: + self.fail("Unexpected ValueError exception: %s" % e.message) From 1df07ce1519bf9b0eff9baeb33a4a31338944df8 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 15 Oct 2014 09:21:23 -0500 Subject: [PATCH 0993/3726] Add test for CUSTOM index meta (PYTHON-165) --- tests/unit/test_metadata.py | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/tests/unit/test_metadata.py b/tests/unit/test_metadata.py index 912042bd19..a4d6af0ba4 100644 --- a/tests/unit/test_metadata.py +++ b/tests/unit/test_metadata.py @@ -26,7 +26,7 @@ NetworkTopologyStrategy, SimpleStrategy, LocalStrategy, NoMurmur3, protect_name, protect_names, protect_value, is_valid_name, - UserType, KeyspaceMetadata) + UserType, KeyspaceMetadata, Metadata) from cassandra.policies import SimpleConvictionPolicy from cassandra.pool import Host @@ -313,3 +313,24 @@ def test_as_cql_query(self): def test_as_cql_query_name_escaping(self): udt = UserType("MyKeyspace", "MyType", ["AbA", "keyspace"], [AsciiType, AsciiType]) self.assertEqual('CREATE TYPE "MyKeyspace"."MyType" ("AbA" ascii, "keyspace" ascii);', udt.as_cql_query(formatted=False)) + + +class IndexTest(unittest.TestCase): + + def test_build_index_as_cql(self): + column_meta = Mock() + column_meta.name = 'column_name_here' + column_meta.table.name = 'table_name_here' + column_meta.table.keyspace.name = 'keyspace_name_here' + meta_model = Metadata(Mock()) + + row = {'index_name': 'index_name_here', 'index_type': 'index_type_here'} + index_meta = meta_model._build_index_metadata(column_meta, row) + self.assertEqual(index_meta.as_cql_query(), + 'CREATE INDEX index_name_here ON keyspace_name_here.table_name_here (column_name_here)') + + row['index_options'] = '{ "class_name": "class_name_here" }' + row['index_type'] = 'CUSTOM' + index_meta = meta_model._build_index_metadata(column_meta, row) + self.assertEqual(index_meta.as_cql_query(), + "CREATE CUSTOM INDEX index_name_here ON keyspace_name_here.table_name_here (column_name_here) USING 'class_name_here'") From 4ece815c29b6f28de901c8b7241e2559e3a59237 Mon Sep 17 00:00:00 2001 From: Irina Kaplounova Date: Wed, 15 Oct 2014 11:21:19 -0700 Subject: [PATCH 0994/3726] Test for PYTHON-126 --- tests/integration/standard/test_query.py | 152 +++++++++++++++++++++++ 1 file changed, 152 insertions(+) diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index cacda2a2ae..f1d716498e 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -423,3 +423,155 @@ def test_no_connection_refused_on_timeout(self): # Make sure test passed self.assertTrue(received_timeout) + +class BatchStatementAdd_And_RoutingKey(unittest.TestCase): + # Test for PYTHON-126: BatchStatement.add() should set the routing key of the first added prepared statement + + def setUp(self): + if PROTOCOL_VERSION < 2: + raise unittest.SkipTest( + "Protocol 2.0+ is required for BATCH operations, currently testing against %r" + % (PROTOCOL_VERSION,)) + self.cluster = Cluster(protocol_version=PROTOCOL_VERSION) + self.session = self.cluster.connect() + + def tearDown(self): + self.cluster.shutdown() + + def test_rk_from_BoundStatement(self): + """ + batch routing key is inherited from BoundStatement + """ + + prepared = self.session.prepare( + """ + INSERT INTO test3rf.test (k, v) VALUES (?, ?) + """) + + bound = prepared.bind((1, None)) + batch = BatchStatement() + batch.add(bound) + self.assertEqual(batch.routing_key, bound.routing_key) + + def test_rk_set_inBoundStatement(self): + """ + batch routing key is inherited when set explicitly in BoundStatement + """ + + self.cluster = Cluster(protocol_version=PROTOCOL_VERSION) + self.session = self.cluster.connect() + + prepared = self.session.prepare( + """ + INSERT INTO test3rf.test (k, v) VALUES (?, ?) + """) + bound = prepared.bind((1, None)) + bound._set_routing_key('fake_key') + + batch = BatchStatement() + batch.add(bound) + self.assertEqual(batch.routing_key, bound.routing_key) + + def test_rk_from_SimpleStatement(self): + """ + batch routing key is inherited from SimpleStatement + """ + self.cluster = Cluster(protocol_version=PROTOCOL_VERSION) + self.session = self.cluster.connect() + + query = """ + INSERT INTO test1rf.test (k, v) VALUES (?, ?) + """ + statement = SimpleStatement(query) + statement._set_routing_key('another_fake_key') + batch = BatchStatement() + batch.add(statement) + self.assertEqual(batch.routing_key, statement.routing_key) + + def test_inherit_first_rk(self): + """ + compound batch inherits the first routing key of the first added statement (bound statement is first) + """ + + batch = BatchStatement() + + query = """ + INSERT INTO test1rf.test (k, v) VALUES (?, ?) + """ + statement = SimpleStatement(query) + statement._set_routing_key('another_fake_key') + + prepared = self.session.prepare( + """ + INSERT INTO test3rf.test (k, v) VALUES (?, ?) + """) + + bound = prepared.bind((1, None)) + bound._set_routing_key('fake_key') + + batch.add("INSERT INTO test3rf.test (k, v) VALUES (0, 0)") + batch.add(bound) + batch.add(statement) + + for i in range(3): + batch.add(prepared, (i, i)) + + self.assertEqual(batch.routing_key, 'fake_key') + + def test_inherit_first_rk_1(self): + """ + compound batch inherits the first routing key of the first added statement (Simplestatement is first) + """ + batch = BatchStatement() + + query = """ + INSERT INTO test1rf.test (k, v) VALUES (?, ?) + """ + statement = SimpleStatement(query) + statement._set_routing_key('another_fake_key') + + prepared = self.session.prepare( + """ + INSERT INTO test3rf.test (k, v) VALUES (?, ?) + """) + + bound = prepared.bind((1, None)) + bound._set_routing_key('fake_key') + + batch.add("INSERT INTO test3rf.test (k, v) VALUES (0, 0)") + batch.add(statement) + batch.add(bound) + + for i in range(10): + batch.add(prepared, (i, i)) + + self.assertEqual(batch.routing_key, 'another_fake_key') + + def test_inherit_first_rk2(self): + """ + compound batch inherits the first routing key of the first added statement (prepared statement is first) + """ + + batch = BatchStatement() + + query = """ + INSERT INTO test1rf.test (k, v) VALUES (?, ?) + """ + statement = SimpleStatement(query) + statement._set_routing_key('another_fake_key') + + prepared = self.session.prepare( + """ + INSERT INTO test3rf.test (k, v) VALUES (?, ?) + """) + prepared.routing_key_indexes = {0: {0: 0}, 1: {1: 1}} + + bound = prepared.bind((1, None)) + bound._set_routing_key('fake_key') + + batch.add("INSERT INTO test3rf.test (k, v) VALUES (0, 0)") + batch.add(prepared, (1, 0)) + batch.add(bound) + batch.add(statement) + + self.assertEqual(batch.routing_key, '\x04\x00\x00\x00\x04\x00\x00\x00') From 65b0185958373b98faa63a3aaa02c1689ee4885c Mon Sep 17 00:00:00 2001 From: Luper Rouch Date: Wed, 15 Oct 2014 20:59:13 +0200 Subject: [PATCH 0995/3726] Added SSL support in GeventConnection * Calling ssl.wrap_socket() if ssl_options are given * Removed useless select() code in the read and write greenlets, that was causing weird deadlocks in SSL mode --- cassandra/io/geventreactor.py | 29 ++++++----------------------- 1 file changed, 6 insertions(+), 23 deletions(-) diff --git a/cassandra/io/geventreactor.py b/cassandra/io/geventreactor.py index f5096892ed..8c4865af8d 100644 --- a/cassandra/io/geventreactor.py +++ b/cassandra/io/geventreactor.py @@ -12,12 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. import gevent -from gevent import select, socket +from gevent import socket, ssl from gevent.event import Event from gevent.queue import Queue from collections import defaultdict -from functools import partial import logging import os @@ -77,6 +76,8 @@ def __init__(self, *args, **kwargs): for (af, socktype, proto, canonname, sockaddr) in addresses: try: self._socket = socket.socket(af, socktype, proto) + if self.ssl_options: + self._socket = ssl.wrap_socket(self._socket, **self.ssl_options) self._socket.settimeout(1.0) self._socket.connect(sockaddr) sockerr = None @@ -90,8 +91,8 @@ def __init__(self, *args, **kwargs): for args in self.sockopts: self._socket.setsockopt(*args) - self._read_watcher = gevent.spawn(lambda: self.handle_read()) - self._write_watcher = gevent.spawn(lambda: self.handle_write()) + self._read_watcher = gevent.spawn(self.handle_read) + self._write_watcher = gevent.spawn(self.handle_write) self._send_options_message() def close(self): @@ -120,17 +121,8 @@ def handle_close(self): self.close() def handle_write(self): - run_select = partial(select.select, (), (self._socket,), ()) while True: - try: - next_msg = self._write_queue.get() - run_select() - except Exception as exc: - if not self.is_closed: - log.debug("Exception during write select() for %s: %s", self, exc) - self.defunct(exc) - return - + next_msg = self._write_queue.get() try: self._socket.sendall(next_msg) except socket.error as err: @@ -139,16 +131,7 @@ def handle_write(self): return # Leave the write loop def handle_read(self): - run_select = partial(select.select, (self._socket,), (), ()) while True: - try: - run_select() - except Exception as exc: - if not self.is_closed: - log.debug("Exception during read select() for %s: %s", self, exc) - self.defunct(exc) - return - try: buf = self._socket.recv(self.in_buffer_size) self._iobuf.write(buf) From def165264cbefd412e9e94769db8c23df14be04e Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 15 Oct 2014 14:48:36 -0500 Subject: [PATCH 0996/3726] Update ConnectionTests setup name --- tests/integration/standard/test_connection.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integration/standard/test_connection.py b/tests/integration/standard/test_connection.py index 243f1edb55..a0181486dc 100644 --- a/tests/integration/standard/test_connection.py +++ b/tests/integration/standard/test_connection.py @@ -228,7 +228,7 @@ class AsyncoreConnectionTests(ConnectionTests, unittest.TestCase): def setUp(self): if 'gevent.monkey' in sys.modules: raise unittest.SkipTest("Can't test asyncore with gevent monkey patching") - ConnectionTest.setUp(self) + ConnectionTests.setUp(self) class LibevConnectionTests(ConnectionTests, unittest.TestCase): @@ -241,4 +241,4 @@ def setUp(self): if LibevConnection is None: raise unittest.SkipTest( 'libev does not appear to be installed properly') - ConnectionTest.setUp(self) + ConnectionTests.setUp(self) From 85854f23a92da61b6d16c4b2b9d8052d59f80938 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 15 Oct 2014 16:05:49 -0500 Subject: [PATCH 0997/3726] Test simplification and normalize test names. --- tests/integration/standard/test_query.py | 135 ++++++----------------- 1 file changed, 35 insertions(+), 100 deletions(-) diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index 76351fa424..a4b90a00b2 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -422,7 +422,8 @@ def test_no_connection_refused_on_timeout(self): # Make sure test passed self.assertTrue(received_timeout) -class BatchStatementAdd_And_RoutingKey(unittest.TestCase): + +class BatchStatementDefaultRoutingKeyTests(unittest.TestCase): # Test for PYTHON-126: BatchStatement.add() should set the routing key of the first added prepared statement def setUp(self): @@ -432,144 +433,78 @@ def setUp(self): % (PROTOCOL_VERSION,)) self.cluster = Cluster(protocol_version=PROTOCOL_VERSION) self.session = self.cluster.connect() + query = """ + INSERT INTO test3rf.test (k, v) VALUES (?, ?) + """ + self.simple_statement = SimpleStatement(query, routing_key='ss_rk') + self.prepared = self.session.prepare(query) def tearDown(self): self.cluster.shutdown() - def test_rk_from_BoundStatement(self): + def test_rk_from_bound(self): """ batch routing key is inherited from BoundStatement """ - - prepared = self.session.prepare( - """ - INSERT INTO test3rf.test (k, v) VALUES (?, ?) - """) - - bound = prepared.bind((1, None)) - batch = BatchStatement() - batch.add(bound) - self.assertEqual(batch.routing_key, bound.routing_key) - - def test_rk_set_inBoundStatement(self): - """ - batch routing key is inherited when set explicitly in BoundStatement - """ - - self.cluster = Cluster(protocol_version=PROTOCOL_VERSION) - self.session = self.cluster.connect() - - prepared = self.session.prepare( - """ - INSERT INTO test3rf.test (k, v) VALUES (?, ?) - """) - bound = prepared.bind((1, None)) - bound._set_routing_key('fake_key') - + bound = self.prepared.bind((1, None)) batch = BatchStatement() batch.add(bound) + self.assertIsNotNone(batch.routing_key) self.assertEqual(batch.routing_key, bound.routing_key) - def test_rk_from_SimpleStatement(self): + def test_rk_from_simple(self): """ batch routing key is inherited from SimpleStatement """ self.cluster = Cluster(protocol_version=PROTOCOL_VERSION) self.session = self.cluster.connect() - - query = """ - INSERT INTO test1rf.test (k, v) VALUES (?, ?) - """ - statement = SimpleStatement(query) - statement._set_routing_key('another_fake_key') batch = BatchStatement() - batch.add(statement) - self.assertEqual(batch.routing_key, statement.routing_key) + batch.add(self.simple_statement) + self.assertIsNotNone(batch.routing_key) + self.assertEqual(batch.routing_key, self.simple_statement.routing_key) - def test_inherit_first_rk(self): + def test_inherit_first_rk_bound(self): """ compound batch inherits the first routing key of the first added statement (bound statement is first) """ - + bound = self.prepared.bind((100000000, None)) batch = BatchStatement() - - query = """ - INSERT INTO test1rf.test (k, v) VALUES (?, ?) - """ - statement = SimpleStatement(query) - statement._set_routing_key('another_fake_key') - - prepared = self.session.prepare( - """ - INSERT INTO test3rf.test (k, v) VALUES (?, ?) - """) - - bound = prepared.bind((1, None)) - bound._set_routing_key('fake_key') - - batch.add("INSERT INTO test3rf.test (k, v) VALUES (0, 0)") + batch.add("ss with no rk") batch.add(bound) - batch.add(statement) + batch.add(self.simple_statement) for i in range(3): - batch.add(prepared, (i, i)) + batch.add(self.prepared, (i, i)) - self.assertEqual(batch.routing_key, 'fake_key') + self.assertIsNotNone(batch.routing_key) + self.assertEqual(batch.routing_key, bound.routing_key) - def test_inherit_first_rk_1(self): + def test_inherit_first_rk_simple_statement(self): """ compound batch inherits the first routing key of the first added statement (Simplestatement is first) """ + bound = self.prepared.bind((1, None)) batch = BatchStatement() - - query = """ - INSERT INTO test1rf.test (k, v) VALUES (?, ?) - """ - statement = SimpleStatement(query) - statement._set_routing_key('another_fake_key') - - prepared = self.session.prepare( - """ - INSERT INTO test3rf.test (k, v) VALUES (?, ?) - """) - - bound = prepared.bind((1, None)) - bound._set_routing_key('fake_key') - - batch.add("INSERT INTO test3rf.test (k, v) VALUES (0, 0)") - batch.add(statement) + batch.add("ss with no rk") + batch.add(self.simple_statement) batch.add(bound) for i in range(10): - batch.add(prepared, (i, i)) + batch.add(self.prepared, (i, i)) - self.assertEqual(batch.routing_key, 'another_fake_key') + self.assertIsNotNone(batch.routing_key) + self.assertEqual(batch.routing_key, self.simple_statement.routing_key) - def test_inherit_first_rk2(self): + def test_inherit_first_rk_prepared_param(self): """ compound batch inherits the first routing key of the first added statement (prepared statement is first) """ - + bound = self.prepared.bind((2, None)) batch = BatchStatement() - - query = """ - INSERT INTO test1rf.test (k, v) VALUES (?, ?) - """ - statement = SimpleStatement(query) - statement._set_routing_key('another_fake_key') - - prepared = self.session.prepare( - """ - INSERT INTO test3rf.test (k, v) VALUES (?, ?) - """) - prepared.routing_key_indexes = {0: {0: 0}, 1: {1: 1}} - - bound = prepared.bind((1, None)) - bound._set_routing_key('fake_key') - - batch.add("INSERT INTO test3rf.test (k, v) VALUES (0, 0)") - batch.add(prepared, (1, 0)) + batch.add("ss with no rk") + batch.add(self.prepared, (1, 0)) batch.add(bound) - batch.add(statement) + batch.add(self.simple_statement) - self.assertEqual(batch.routing_key, '\x04\x00\x00\x00\x04\x00\x00\x00') + self.assertIsNotNone(batch.routing_key) + self.assertEqual(batch.routing_key, self.prepared.bind((1, 0)).routing_key) From aa88282fdf62428a4a43664db39bfccf6d179ce6 Mon Sep 17 00:00:00 2001 From: Kevin Deldycke Date: Thu, 16 Oct 2014 13:46:37 +0200 Subject: [PATCH 0998/3726] Add third party integration guide. Refs #237 & #238. --- docs/index.rst | 1 + docs/topics/third_party.rst | 67 +++++++++++++++++++++++++++++++++++++ 2 files changed, 68 insertions(+) create mode 100644 docs/topics/third_party.rst diff --git a/docs/index.rst b/docs/index.rst index c02138cd34..ab40e400a0 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -31,6 +31,7 @@ Contents: topics/manage_schemas topics/external_resources topics/related_projects + topics/third_party topics/development topics/faq diff --git a/docs/topics/third_party.rst b/docs/topics/third_party.rst new file mode 100644 index 0000000000..89f23eccb8 --- /dev/null +++ b/docs/topics/third_party.rst @@ -0,0 +1,67 @@ +======================== +Third party integrations +======================== + + +Celery +------ + +Here's how, in substance, CQLengine can be plugged to `Celery +`_: + +.. code-block:: python + + from celery import Celery + from celery.signals import worker_process_init, beat_init + from cqlengine import connection + from cqlengine.connection import ( + cluster as cql_cluster, session as cql_session) + + def cassandra_init(): + """ Initialize a clean Cassandra connection. """ + if cql_cluster is not None: + cql_cluster.shutdown() + if cql_session is not None: + cql_session.shutdown() + connection.setup() + + # Initialize worker context for both standard and periodic tasks. + worker_process_init.connect(cassandra_init) + beat_init.connect(cassandra_init) + + app = Celery() + +For more details, see `issue #237 +`_. + + +uWSGI +----- + +This is the code required for proper connection handling of CQLengine for a +`uWSGI `_-run application: + +.. code-block:: python + + from cqlengine import connection + from cqlengine.connection import ( + cluster as cql_cluster, session as cql_session) + + try: + from uwsgidecorators import postfork + except ImportError: + # We're not in a uWSGI context, no need to hook Cassandra session + # initialization to the postfork event. + pass + else: + @postfork + def cassandra_init(): + """ Initialize a new Cassandra session in the context. + + Ensures that a new session is returned for every new request. + """ + if cql_cluster is not None: + cql_cluster.shutdown() + if cql_session is not None: + cql_session.shutdown() + connection.setup() From 68895a08fc586b4b3fb1c5e6a8ea6808e91996e0 Mon Sep 17 00:00:00 2001 From: Luper Rouch Date: Thu, 16 Oct 2014 14:19:40 +0200 Subject: [PATCH 0999/3726] put back select() calls in gevent read/write loops --- cassandra/io/geventreactor.py | 27 +++++++++++++++++++++++++-- 1 file changed, 25 insertions(+), 2 deletions(-) diff --git a/cassandra/io/geventreactor.py b/cassandra/io/geventreactor.py index 8c4865af8d..742f6eaa30 100644 --- a/cassandra/io/geventreactor.py +++ b/cassandra/io/geventreactor.py @@ -12,11 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. import gevent -from gevent import socket, ssl +from gevent import select, socket, ssl from gevent.event import Event from gevent.queue import Queue from collections import defaultdict +from functools import partial import logging import os @@ -121,8 +122,17 @@ def handle_close(self): self.close() def handle_write(self): + run_select = partial(select.select, (), (self._socket,), ()) while True: - next_msg = self._write_queue.get() + try: + next_msg = self._write_queue.get() + run_select() + except Exception as exc: + if not self.is_closed: + log.debug("Exception during write select() for %s: %s", self, exc) + self.defunct(exc) + return + try: self._socket.sendall(next_msg) except socket.error as err: @@ -131,7 +141,20 @@ def handle_write(self): return # Leave the write loop def handle_read(self): + run_select = partial(select.select, (self._socket,), (), ()) while True: + try: + ready_read, _, _ = run_select() + except Exception as exc: + if not self.is_closed: + log.debug("Exception during read select() for %s: %s", self, exc) + self.defunct(exc) + return + + if not ready_read: + print 'foo' + continue + try: buf = self._socket.recv(self.in_buffer_size) self._iobuf.write(buf) From 401adb6741f0e8c14c089584649eaae6f4b443fa Mon Sep 17 00:00:00 2001 From: Luper Rouch Date: Thu, 16 Oct 2014 15:03:08 +0200 Subject: [PATCH 1000/3726] properly drain pending data on SSL socket in the gevent reactor --- cassandra/io/geventreactor.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/cassandra/io/geventreactor.py b/cassandra/io/geventreactor.py index 742f6eaa30..ee3b638951 100644 --- a/cassandra/io/geventreactor.py +++ b/cassandra/io/geventreactor.py @@ -79,6 +79,9 @@ def __init__(self, *args, **kwargs): self._socket = socket.socket(af, socktype, proto) if self.ssl_options: self._socket = ssl.wrap_socket(self._socket, **self.ssl_options) + self._ssl_socket = True + else: + self._ssl_socket = False self._socket.settimeout(1.0) self._socket.connect(sockaddr) sockerr = None @@ -158,6 +161,13 @@ def handle_read(self): try: buf = self._socket.recv(self.in_buffer_size) self._iobuf.write(buf) + if self._ssl_socket: + # We need to drain pending data when dealing with a SSL socket + data_left = self._socket.pending() + while data_left: + buf = self._socket.recv(data_left) + self._iobuf.write(buf) + data_left = self._socket.pending() except socket.error as err: if not is_timeout(err): log.debug("Exception during socket recv for %s: %s", self, err) From 70f1886a3e1e13e6d17bc19274d00bd996d4d08e Mon Sep 17 00:00:00 2001 From: Luper Rouch Date: Thu, 16 Oct 2014 15:04:01 +0200 Subject: [PATCH 1001/3726] cleanup --- cassandra/io/geventreactor.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/cassandra/io/geventreactor.py b/cassandra/io/geventreactor.py index ee3b638951..07206c71d2 100644 --- a/cassandra/io/geventreactor.py +++ b/cassandra/io/geventreactor.py @@ -147,17 +147,13 @@ def handle_read(self): run_select = partial(select.select, (self._socket,), (), ()) while True: try: - ready_read, _, _ = run_select() + run_select() except Exception as exc: if not self.is_closed: log.debug("Exception during read select() for %s: %s", self, exc) self.defunct(exc) return - if not ready_read: - print 'foo' - continue - try: buf = self._socket.recv(self.in_buffer_size) self._iobuf.write(buf) From 6e4dc3afdfbba5253218c7bc9565c6666bebe410 Mon Sep 17 00:00:00 2001 From: timmartin19 Date: Thu, 16 Oct 2014 12:18:56 -0400 Subject: [PATCH 1002/3726] Small fixes and tests added --- cqlengine/connection.py | 9 ++++++++- cqlengine/models.py | 7 ++++--- cqlengine/query.py | 11 +++-------- cqlengine/statements.py | 6 +----- cqlengine/tests/base.py | 12 +++++++----- 5 files changed, 23 insertions(+), 22 deletions(-) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index 444400d5b0..8e465e8d1c 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -5,6 +5,7 @@ from collections import namedtuple from cassandra.cluster import Cluster, NoHostAvailable from cassandra.query import SimpleStatement, Statement +from cqlengine.exceptions import TransactionException import six try: @@ -88,7 +89,7 @@ def setup( def execute(query, params=None, consistency_level=None): handle_lazy_connect() - + statement = query if not session: raise CQLEngineException("It is required to setup() cqlengine before executing queries") @@ -111,6 +112,12 @@ def execute(query, params=None, consistency_level=None): params = params or {} result = session.execute(query, params) + if result and result[0].get('[applied]', True) is False: + result[0].pop('[applied]') + expected = ', '.join('{0}={1}'.format(t.field, t.value) for t in statement.transactions) + actual = ', '.join('{0}={1}'.format(f, v) for f, v in result[0].items()) + message = 'Transaction statement failed: Expected: {0} Actual: {1}'.format(expected, actual) + raise TransactionException(message) return result def get_session(): diff --git a/cqlengine/models.py b/cqlengine/models.py index 3598bcad3e..b60521a7b1 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -91,9 +91,10 @@ def transaction_setter(*prepared_transaction, **unprepared_transactions): return transaction_setter qs = model.__queryset__(model) - def transaction_setter(transaction): - qs._transaction = transaction - return instance + def transaction_setter(**unprepared_transactions): + transactions = model.objects.transaction(**unprepared_transactions)._transaction + qs._transaction = transactions + return qs return transaction_setter def __call__(self, *args, **kwargs): diff --git a/cqlengine/query.py b/cqlengine/query.py index 406d416161..a4f701a66b 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -413,7 +413,7 @@ def transaction(self, *args, **kwargs): try: column = self.model._get_column(col_name) except KeyError: - if col_name in ['exists', 'not_exists']: + if col_name == 'not_exists': exists = True elif col_name == 'pk__token': if not isinstance(val, Token): @@ -767,7 +767,8 @@ def update(self, **values): return nulled_columns = set() - us = UpdateStatement(self.column_family_name, where=self._where, ttl=self._ttl, timestamp=self._timestamp) + us = UpdateStatement(self.column_family_name, where=self._where, ttl=self._ttl, + timestamp=self._timestamp, transactions=self._transaction) for name, val in values.items(): col_name, col_op = self._parse_filter_arg(name) col = self.model._columns.get(col_name) @@ -836,12 +837,6 @@ def _execute(self, q): return self._batch.add_query(q) else: tmp = execute(q, consistency_level=self._consistency) - if tmp and tmp[0].get('[applied]', True) is False: - tmp[0].pop('[applied]') - expected = ', '.join('{0}={1}'.format(t.field, t.value) for t in q.transactions) - actual = ', '.join('{0}={1}'.format(f, v) for f, v in tmp[0].items()) - message = 'Transaction statement failed: Expected: {0} Actual: {1}'.format(expected, actual) - raise TransactionException(message) return tmp def batch(self, batch_obj): diff --git a/cqlengine/statements.py b/cqlengine/statements.py index e4a2aa5b77..5618fa49b2 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -143,8 +143,6 @@ class TransactionClause(BaseClause): """ A single variable transaction statement """ def __unicode__(self): - if self.field == 'exists': - return u'EXISTS' if self.field == 'not_exists': return u'NOT EXISTS' return u'"{}" = %({})s'.format(self.field, self.context_id) @@ -153,9 +151,7 @@ def insert_tuple(self): return self.field, self.context_id def update_context(self, ctx): - if self.field not in ['exists', 'not_exists']: - return super(TransactionClause, self).update_context(ctx) - return ctx + return super(TransactionClause, self).update_context(ctx) class ContainerUpdateClause(AssignmentClause): diff --git a/cqlengine/tests/base.py b/cqlengine/tests/base.py index dea0cc191b..90581e4244 100644 --- a/cqlengine/tests/base.py +++ b/cqlengine/tests/base.py @@ -2,9 +2,9 @@ import os import sys import six -from cqlengine.connection import get_session +from cqlengine import connection -CASSANDRA_VERSION = int(os.environ['CASSANDRA_VERSION']) +CASSANDRA_VERSION = 20 #int(os.environ['CASSANDRA_VERSION']) class BaseCassEngTestCase(TestCase): @@ -13,9 +13,11 @@ class BaseCassEngTestCase(TestCase): # super(BaseCassEngTestCase, cls).setUpClass() session = None - def setUp(self): - self.session = get_session() - super(BaseCassEngTestCase, self).setUp() + @classmethod + def setUpClass(cls): + connection.setup(['192.168.56.103'], 'test') + cls.session = connection.get_session() + super(BaseCassEngTestCase, cls).setUpClass() def assertHasAttr(self, obj, attr): self.assertTrue(hasattr(obj, attr), From de18d562962741818fa0591e8c08c0342406ba87 Mon Sep 17 00:00:00 2001 From: timmartin19 Date: Thu, 16 Oct 2014 12:19:54 -0400 Subject: [PATCH 1003/3726] Reverted test base to original --- cqlengine/tests/base.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/cqlengine/tests/base.py b/cqlengine/tests/base.py index 90581e4244..dea0cc191b 100644 --- a/cqlengine/tests/base.py +++ b/cqlengine/tests/base.py @@ -2,9 +2,9 @@ import os import sys import six -from cqlengine import connection +from cqlengine.connection import get_session -CASSANDRA_VERSION = 20 #int(os.environ['CASSANDRA_VERSION']) +CASSANDRA_VERSION = int(os.environ['CASSANDRA_VERSION']) class BaseCassEngTestCase(TestCase): @@ -13,11 +13,9 @@ class BaseCassEngTestCase(TestCase): # super(BaseCassEngTestCase, cls).setUpClass() session = None - @classmethod - def setUpClass(cls): - connection.setup(['192.168.56.103'], 'test') - cls.session = connection.get_session() - super(BaseCassEngTestCase, cls).setUpClass() + def setUp(self): + self.session = get_session() + super(BaseCassEngTestCase, self).setUp() def assertHasAttr(self, obj, attr): self.assertTrue(hasattr(obj, attr), From 281fb84a1551763ad68c7b11778e7bcdc67a6862 Mon Sep 17 00:00:00 2001 From: timmartin19 Date: Thu, 16 Oct 2014 13:32:02 -0400 Subject: [PATCH 1004/3726] Added tests for real. --- .gitignore | 3 - .../statements/test_transaction_statement.py | 30 +++ cqlengine/tests/test_ifnotexists.py | 178 ++++++++++++++++++ cqlengine/tests/test_transaction.py | 78 ++++++++ 4 files changed, 286 insertions(+), 3 deletions(-) create mode 100644 cqlengine/tests/statements/test_transaction_statement.py create mode 100644 cqlengine/tests/test_ifnotexists.py create mode 100644 cqlengine/tests/test_transaction.py diff --git a/.gitignore b/.gitignore index f576678427..b43bb3d158 100644 --- a/.gitignore +++ b/.gitignore @@ -45,6 +45,3 @@ docs/_build #iPython *.ipynb - -cqlengine/tests/* -cqlengine/tests* \ No newline at end of file diff --git a/cqlengine/tests/statements/test_transaction_statement.py b/cqlengine/tests/statements/test_transaction_statement.py new file mode 100644 index 0000000000..4ead43a3b4 --- /dev/null +++ b/cqlengine/tests/statements/test_transaction_statement.py @@ -0,0 +1,30 @@ +__author__ = 'Tim Martin' +from unittest import TestCase +from cqlengine.statements import TransactionClause +import six + +class TestTransactionClause(TestCase): + + def test_not_exists_clause(self): + tc = TransactionClause('not_exists', True) + + self.assertEqual('NOT EXISTS', six.text_type(tc)) + self.assertEqual('NOT EXISTS', str(tc)) + + def test_normal_transaction(self): + tc = TransactionClause('some_value', 23) + tc.set_context_id(3) + + self.assertEqual('"some_value" = %(3)s', six.text_type(tc)) + self.assertEqual('"some_value" = %(3)s', str(tc)) + + def test_equality(self): + tc1 = TransactionClause('some_value', 5) + tc2 = TransactionClause('some_value', 5) + + assert tc1 == tc2 + + tc3 = TransactionClause('not_exists', True) + tc4 = TransactionClause('not_exists', True) + + assert tc3 == tc4 \ No newline at end of file diff --git a/cqlengine/tests/test_ifnotexists.py b/cqlengine/tests/test_ifnotexists.py new file mode 100644 index 0000000000..202a1c03e9 --- /dev/null +++ b/cqlengine/tests/test_ifnotexists.py @@ -0,0 +1,178 @@ +from unittest import skipUnless +from cqlengine.management import sync_table, drop_table, create_keyspace, delete_keyspace +from cqlengine.tests.base import BaseCassEngTestCase +from cqlengine.models import Model +from cqlengine.exceptions import LWTException +from cqlengine import columns, BatchQuery +from uuid import uuid4 +import mock +from cqlengine.connection import get_cluster + +cluster = get_cluster() + + +class TestIfNotExistsModel(Model): + + __keyspace__ = 'cqlengine_test_lwt' + + id = columns.UUID(primary_key=True, default=lambda:uuid4()) + count = columns.Integer() + text = columns.Text(required=False) + + +class BaseIfNotExistsTest(BaseCassEngTestCase): + + @classmethod + def setUpClass(cls): + super(BaseIfNotExistsTest, cls).setUpClass() + """ + when receiving an insert statement with 'if not exist', cassandra would + perform a read with QUORUM level. Unittest would be failed if replica_factor + is 3 and one node only. Therefore I have create a new keyspace with + replica_factor:1. + """ + create_keyspace(TestIfNotExistsModel.__keyspace__, replication_factor=1) + sync_table(TestIfNotExistsModel) + + @classmethod + def tearDownClass(cls): + super(BaseCassEngTestCase, cls).tearDownClass() + drop_table(TestIfNotExistsModel) + delete_keyspace(TestIfNotExistsModel.__keyspace__) + + +class IfNotExistsInsertTests(BaseIfNotExistsTest): + + @skipUnless(cluster.protocol_version >= 2, "only runs against the cql3 protocol v2.0") + def test_insert_if_not_exists_success(self): + """ tests that insertion with if_not_exists work as expected """ + + id = uuid4() + + TestIfNotExistsModel.create(id=id, count=8, text='123456789') + self.assertRaises( + LWTException, + TestIfNotExistsModel.if_not_exists().create, id=id, count=9, text='111111111111' + ) + + q = TestIfNotExistsModel.objects(id=id) + self.assertEqual(len(q), 1) + + tm = q.first() + self.assertEquals(tm.count, 8) + self.assertEquals(tm.text, '123456789') + + def test_insert_if_not_exists_failure(self): + """ tests that insertion with if_not_exists failure """ + + id = uuid4() + + TestIfNotExistsModel.create(id=id, count=8, text='123456789') + TestIfNotExistsModel.create(id=id, count=9, text='111111111111') + + q = TestIfNotExistsModel.objects(id=id) + self.assertEquals(len(q), 1) + + tm = q.first() + self.assertEquals(tm.count, 9) + self.assertEquals(tm.text, '111111111111') + + @skipUnless(cluster.protocol_version >= 2, "only runs against the cql3 protocol v2.0") + def test_batch_insert_if_not_exists_success(self): + """ tests that batch insertion with if_not_exists work as expected """ + + id = uuid4() + + with BatchQuery() as b: + TestIfNotExistsModel.batch(b).if_not_exists().create(id=id, count=8, text='123456789') + + b = BatchQuery() + TestIfNotExistsModel.batch(b).if_not_exists().create(id=id, count=9, text='111111111111') + self.assertRaises(LWTException, b.execute) + + q = TestIfNotExistsModel.objects(id=id) + self.assertEqual(len(q), 1) + + tm = q.first() + self.assertEquals(tm.count, 8) + self.assertEquals(tm.text, '123456789') + + def test_batch_insert_if_not_exists_failure(self): + """ tests that batch insertion with if_not_exists failure """ + id = uuid4() + + with BatchQuery() as b: + TestIfNotExistsModel.batch(b).create(id=id, count=8, text='123456789') + with BatchQuery() as b: + TestIfNotExistsModel.batch(b).create(id=id, count=9, text='111111111111') + + q = TestIfNotExistsModel.objects(id=id) + self.assertEquals(len(q), 1) + + tm = q.first() + self.assertEquals(tm.count, 9) + self.assertEquals(tm.text, '111111111111') + + +class IfNotExistsModelTest(BaseIfNotExistsTest): + + def test_if_not_exists_included_on_create(self): + """ tests that if_not_exists on models works as expected """ + + with mock.patch.object(self.session, 'execute') as m: + TestIfNotExistsModel.if_not_exists().create(count=8) + + query = m.call_args[0][0].query_string + self.assertIn("IF NOT EXISTS", query) + + def test_if_not_exists_included_on_save(self): + """ tests if we correctly put 'IF NOT EXISTS' for insert statement """ + + with mock.patch.object(self.session, 'execute') as m: + tm = TestIfNotExistsModel(count=8) + tm.if_not_exists(True).save() + + query = m.call_args[0][0].query_string + self.assertIn("IF NOT EXISTS", query) + + def test_queryset_is_returned_on_class(self): + """ ensure we get a queryset description back """ + qs = TestIfNotExistsModel.if_not_exists() + self.assertTrue(isinstance(qs, TestIfNotExistsModel.__queryset__), type(qs)) + + def test_batch_if_not_exists(self): + """ ensure 'IF NOT EXISTS' exists in statement when in batch """ + with mock.patch.object(self.session, 'execute') as m: + with BatchQuery() as b: + TestIfNotExistsModel.batch(b).if_not_exists().create(count=8) + + self.assertIn("IF NOT EXISTS", m.call_args[0][0].query_string) + + +class IfNotExistsInstanceTest(BaseIfNotExistsTest): + + def test_instance_is_returned(self): + """ + ensures that we properly handle the instance.if_not_exists(True).save() + scenario + """ + o = TestIfNotExistsModel.create(text="whatever") + o.text = "new stuff" + o = o.if_not_exists(True) + self.assertEqual(True, o._if_not_exists) + + def test_if_not_exists_is_not_include_with_query_on_update(self): + """ + make sure we don't put 'IF NOT EXIST' in update statements + """ + o = TestIfNotExistsModel.create(text="whatever") + o.text = "new stuff" + o = o.if_not_exists(True) + + with mock.patch.object(self.session, 'execute') as m: + o.save() + + query = m.call_args[0][0].query_string + self.assertNotIn("IF NOT EXIST", query) + + diff --git a/cqlengine/tests/test_transaction.py b/cqlengine/tests/test_transaction.py new file mode 100644 index 0000000000..9dbe963252 --- /dev/null +++ b/cqlengine/tests/test_transaction.py @@ -0,0 +1,78 @@ +__author__ = 'Tim Martin' +from cqlengine.management import sync_table, drop_table +from cqlengine.tests.base import BaseCassEngTestCase +from cqlengine.models import Model +from cqlengine.exceptions import TransactionException +from uuid import uuid4 +from cqlengine import columns +import mock +from cqlengine import ALL, BatchQuery + + +class TestTransactionModel(Model): + __keyspace__ = 'test' + id = columns.UUID(primary_key=True, default=lambda:uuid4()) + count = columns.Integer() + text = columns.Text(required=False) + + +class TestTransaction(BaseCassEngTestCase): + + @classmethod + def setUpClass(cls): + super(TestTransaction, cls).setUpClass() + sync_table(TestTransactionModel) + + @classmethod + def tearDownClass(cls): + super(TestTransaction, cls).tearDownClass() + drop_table(TestTransactionModel) + + def test_create_uses_transaction(self): + qs = TestTransactionModel.transaction(not_exists=True) + with mock.patch.object(self.session, 'execute') as m: + qs.create(text='blah blah', count=2) + args = m.call_args + self.assertIn('IF NOT EXISTS', args[0][0].query_string) + + def test_queryset_returned_on_create(self): + qs = TestTransactionModel.transaction(not_exists=True) + self.assertTrue(isinstance(qs, TestTransactionModel.__queryset__), type(qs)) + + def test_update_using_transaction(self): + t = TestTransactionModel.create(text='blah blah') + t.text = 'new blah' + with mock.patch.object(self.session, 'execute') as m: + t.transaction(text='blah blah').save() + + args = m.call_args + self.assertIn('IF "text" = %(0)s', args[0][0].query_string) + + def test_update_failure(self): + t = TestTransactionModel.create(text='blah blah') + t.text = 'new blah' + t = t.transaction(text='something wrong') + self.assertRaises(TransactionException, t.save) + + def test_creation_failure(self): + t = TestTransactionModel.create(text='blah blah') + t_clone = TestTransactionModel.transaction(not_exists=True) + self.assertRaises(TransactionException, t_clone.create, id=t.id, count=t.count, text=t.text) + + def test_blind_update(self): + t = TestTransactionModel.create(text='blah blah') + t.text = 'something else' + uid = t.id + + with mock.patch.object(self.session, 'execute') as m: + TestTransactionModel.objects(id=uid).transaction(text='blah blah').update(text='oh hey der') + + args = m.call_args + self.assertIn('IF "text" = %(1)s', args[0][0].query_string) + + def test_blind_update_fail(self): + t = TestTransactionModel.create(text='blah blah') + t.text = 'something else' + uid = t.id + qs = TestTransactionModel.objects(id=uid).transaction(text='Not dis!') + self.assertRaises(TransactionException, qs.update, text='this will never work') \ No newline at end of file From 57454bd55419cc99bca9870eaf1e1a6bec8061d3 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 16 Oct 2014 18:34:41 +0000 Subject: [PATCH 1005/3726] Updates for 2.1.2 release. --- CHANGELOG.rst | 3 ++- cassandra/__init__.py | 2 +- debian/changelog | 6 ++++++ 3 files changed, 9 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index e349e82bae..4974cbd0a0 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,6 +1,6 @@ 2.1.2 ===== -In Progress +October 16, 2014 Features -------- @@ -11,6 +11,7 @@ Features Bug Fixes --------- +* Make execute_concurrent compatible with Python 2.6 (PYTHON-159) * Handle Unauthorized message on schema_triggers query (PYTHON-155) * Make execute_concurrent compatible with Python 2.6 (github-197) * Pure Python sorted set in support of UDTs nested in collections (PYTON-167) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index bcabdb195a..d6f3d04f98 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -23,7 +23,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (2, 1, 1, 'post') +__version_info__ = (2, 1, 2) __version__ = '.'.join(map(str, __version_info__)) diff --git a/debian/changelog b/debian/changelog index 2be28d7dbf..eb73ff54e0 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +python-cassandra-driver (2.1.2) unstable; urgency=low + + * Release 2.1.2 + + -- Adam Holmberg Thu, 16 Oct 2014 18:30:22 +0000 + python-cassandra-driver (2.1.1) unstable; urgency=low * Release 2.1.1 From ebf5bf87165d95c7157bd2372f5226302ac2af77 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 16 Oct 2014 18:45:12 +0000 Subject: [PATCH 1006/3726] Post-release version update --- cassandra/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index d6f3d04f98..5be259b91e 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -23,7 +23,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (2, 1, 2) +__version_info__ = (2, 1, 2, 'post') __version__ = '.'.join(map(str, __version_info__)) From ddf9c789818f1e915d5cb709c7dd0f52722706fd Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 17 Oct 2014 14:11:49 +0000 Subject: [PATCH 1007/3726] Qualify except branch around namedtuple construction. --- cassandra/query.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/query.py b/cassandra/query.py index 85c9d0156e..11161afe95 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -102,7 +102,7 @@ def named_tuple_factory(colnames, rows): clean_column_names = map(_clean_column_name, colnames) try: Row = namedtuple('Row', clean_column_names) - except: + except Exception: log.warn("Failed creating named tuple for results with column names %s (cleaned: %s) (see Python 'namedtuple' documentation for details on name rules). " "Results will be returned with positional names. " "Avoid this by choosing different names, using SELECT \"\" AS aliases, " From 09427f4d75cb1c87409f9d7768354bd9a56a72a6 Mon Sep 17 00:00:00 2001 From: timmartin19 Date: Wed, 8 Oct 2014 12:24:47 -0400 Subject: [PATCH 1008/3726] =?UTF-8?q?Initial=20attempt=E2=80=A6?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- cqlengine/models.py | 28 ++++++++++- cqlengine/query.py | 49 ++++++++++++++++++- cqlengine/statements.py | 44 ++++++++++++++++- .../model/test_transaction_statements.py | 36 ++++++++++++++ 4 files changed, 154 insertions(+), 3 deletions(-) create mode 100644 cqlengine/tests/model/test_transaction_statements.py diff --git a/cqlengine/models.py b/cqlengine/models.py index 7781242685..880f6207b5 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -74,6 +74,27 @@ def __call__(self, *args, **kwargs): raise NotImplementedError +class TransactionDescriptor(object): + """ + returns a query set descriptor + """ + def __get__(self, instance, model): + if instance: + def transaction_setter(transaction): + instance._transaction = transaction + return instance + return transaction_setter + qs = model.__queryset__(model) + + def transaction_setter(transaction): + qs._transaction = transaction + return instance + return transaction_setter + + def __call__(self, *args, **kwargs): + raise NotImplementedError + + class TTLDescriptor(object): """ returns a query set descriptor @@ -239,6 +260,7 @@ class MultipleObjectsReturned(_MultipleObjectsReturned): pass objects = QuerySetDescriptor() ttl = TTLDescriptor() consistency = ConsistencyDescriptor() + transaction = TransactionDescriptor() # custom timestamps, see USING TIMESTAMP X timestamp = TimestampDescriptor() @@ -303,7 +325,7 @@ def __init__(self, **values): self._timestamp = None for name, column in self._columns.items(): - value = values.get(name, None) + value = values.get(name, None) if value is not None or isinstance(column, columns.BaseContainerColumn): value = column.to_python(value) value_mngr = column.value_manager(self, column, value) @@ -531,6 +553,10 @@ def filter(cls, *args, **kwargs): return cls.objects.filter(*args, **kwargs) + @classmethod + def transaction(cls, *args, **kwargs): + return cls.objects.transaction(*args, **kwargs) + @classmethod def get(cls, *args, **kwargs): return cls.objects.get(*args, **kwargs) diff --git a/cqlengine/query.py b/cqlengine/query.py index fd2f0072cf..8ee7e62135 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -13,7 +13,7 @@ #http://www.datastax.com/docs/1.1/references/cql/index from cqlengine.operators import InOperator, EqualsOperator, GreaterThanOperator, GreaterThanOrEqualOperator from cqlengine.operators import LessThanOperator, LessThanOrEqualOperator, BaseWhereOperator -from cqlengine.statements import WhereClause, SelectStatement, DeleteStatement, UpdateStatement, AssignmentClause, InsertStatement, BaseCQLStatement, MapUpdateClause, MapDeleteClause, ListUpdateClause, SetUpdateClause, CounterUpdateClause +from cqlengine.statements import WhereClause, SelectStatement, DeleteStatement, UpdateStatement, AssignmentClause, InsertStatement, BaseCQLStatement, MapUpdateClause, MapDeleteClause, ListUpdateClause, SetUpdateClause, CounterUpdateClause, TransactionClause class QueryException(CQLEngineException): pass @@ -206,6 +206,9 @@ def __init__(self, model): #Where clause filters self._where = [] + # Transaction clause filters + self._transaction = [] + #ordering arguments self._order = [] @@ -407,6 +410,50 @@ def _parse_filter_arg(self, arg): else: raise QueryException("Can't parse '{}'".format(arg)) + def transaction(self, *args, **kwargs): + """Adds IF statements to queryset""" + if len([x for x in kwargs.values() if x is None]): + raise CQLEngineException("None values on transaction are not allowed") + + clone = copy.deepcopy(self) + for operator in args: + if not isinstance(operator, TransactionClause): + raise QueryException('{} is not a valid query operator'.format(operator)) + clone._transaction.append(operator) + + for col_name, val in kwargs.items(): + try: + column = self.model._get_column(col_name) + except KeyError: + if col_name in ['exists', 'not_exists']: + pass + elif col_name == 'pk__token': + if not isinstance(val, Token): + raise QueryException("Virtual column 'pk__token' may only be compared to Token() values") + column = columns._PartitionKeysToken(self.model) + quote_field = False + else: + raise QueryException("Can't resolve column name: '{}'".format(col_name)) + + if isinstance(val, Token): + if col_name != 'pk__token': + raise QueryException("Token() values may only be compared to the 'pk__token' virtual column") + partition_columns = column.partition_columns + if len(partition_columns) != len(val.value): + raise QueryException( + 'Token() received {} arguments but model has {} partition keys'.format( + len(val.value), len(partition_columns))) + val.set_columns(partition_columns) + + if isinstance(val, BaseQueryFunction): + query_val = val + else: + query_val = column.to_database(val) + + clone._transaction.append(TransactionClause(col_name, query_val)) + + return clone + def filter(self, *args, **kwargs): """ Adds WHERE arguments to the queryset, returning a new queryset diff --git a/cqlengine/statements.py b/cqlengine/statements.py index a6cdf4b3e7..5cdef38af2 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -139,6 +139,20 @@ def insert_tuple(self): return self.field, self.context_id +class TransactionClause(BaseClause): + """ A single variable transaction statement """ + + def __unicode__(self): + if self.field == 'exists': + return u'EXISTS' + if self.field == 'not_exists': + return u'NOT EXISTS' + return u'"{}" = %({})s'.format(self.field, self.context_id) + + def insert_tuple(self): + return self.field, self.context_id + + class ContainerUpdateClause(AssignmentClause): def __init__(self, field, value, operation=None, previous=None, column=None): @@ -573,7 +587,8 @@ def __init__(self, consistency=None, where=None, ttl=None, - timestamp=None): + timestamp=None, + transactions=None): super(AssignmentStatement, self).__init__( table, consistency=consistency, @@ -587,6 +602,11 @@ def __init__(self, for assignment in assignments or []: self.add_assignment_clause(assignment) + # Add transaction statements + self.transactions = [] + for transaction in transactions or []: + self.add_transaction_clause(transaction) + def update_context_id(self, i): super(AssignmentStatement, self).update_context_id(i) for assignment in self.assignments: @@ -605,6 +625,19 @@ def add_assignment_clause(self, clause): self.context_counter += clause.get_context_size() self.assignments.append(clause) + def add_transaction_clause(self, clause): + """ + Adds a transaction clause to this statement + + :param clause: The clause that will be added to the transaction statement + :type clause: TransactionClause + """ + if not isinstance(clause, TransactionClause): + raise StatementException('only instances of AssignmentClause can be added to statements') + clause.set_context_id(self.context_counter) + self.context_counter += clause.get_context_size() + self.transactions.append(clause) + @property def is_empty(self): return len(self.assignments) == 0 @@ -615,6 +648,9 @@ def get_context(self): clause.update_context(ctx) return ctx + def _transactions(self): + return 'IF {}'.format(' AND '.join([six.text_type(c) for c in self.where_clauses])) + class InsertStatement(AssignmentStatement): """ an cql insert select statement """ @@ -660,6 +696,9 @@ def __unicode__(self): if self.timestamp: qs += ["USING TIMESTAMP {}".format(self.timestamp_normalized)] + if len(self.transactions) > 0: + qs += [self._transactions] + return ' '.join(qs) @@ -686,6 +725,9 @@ def __unicode__(self): if self.where_clauses: qs += [self._where] + if len(self.transactions) > 0: + qs += [self._transactions] + return ' '.join(qs) diff --git a/cqlengine/tests/model/test_transaction_statements.py b/cqlengine/tests/model/test_transaction_statements.py new file mode 100644 index 0000000000..8afabb1ed2 --- /dev/null +++ b/cqlengine/tests/model/test_transaction_statements.py @@ -0,0 +1,36 @@ +__author__ = 'Tim Martin' +from uuid import uuid4 + +from mock import patch +from cqlengine.exceptions import ValidationError + +from cqlengine.tests.base import BaseCassEngTestCase +from cqlengine.models import Model +from cqlengine import columns +from cqlengine.management import sync_table, drop_table + + +class TestUpdateModel(Model): + __keyspace__ = 'test' + partition = columns.UUID(primary_key=True, default=uuid4) + cluster = columns.UUID(primary_key=True, default=uuid4) + count = columns.Integer(required=False) + text = columns.Text(required=False, index=True) + + +class ModelUpdateTests(BaseCassEngTestCase): + + @classmethod + def setUpClass(cls): + super(ModelUpdateTests, cls).setUpClass() + sync_table(TestUpdateModel) + + @classmethod + def tearDownClass(cls): + super(ModelUpdateTests, cls).tearDownClass() + drop_table(TestUpdateModel) + + def test_transaction_insertion(self): + m = TestUpdateModel(count=5, text='something').transaction(exists=True) + m.save() + x = 10 \ No newline at end of file From 874a63cdbcc658023c3d0f5c22c57b472d00cd68 Mon Sep 17 00:00:00 2001 From: timmartin19 Date: Wed, 8 Oct 2014 14:48:35 -0400 Subject: [PATCH 1009/3726] Bug fixes --- cqlengine/models.py | 20 +++++++++++-------- cqlengine/query.py | 14 ++++++++----- cqlengine/statements.py | 15 ++++++++++---- .../model/test_transaction_statements.py | 17 ++++++++++++---- 4 files changed, 45 insertions(+), 21 deletions(-) diff --git a/cqlengine/models.py b/cqlengine/models.py index 880f6207b5..f3114d6c21 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -80,9 +80,14 @@ class TransactionDescriptor(object): """ def __get__(self, instance, model): if instance: - def transaction_setter(transaction): - instance._transaction = transaction + def transaction_setter(*prepared_transaction, **unprepared_transactions): + if len(prepared_transaction) > 0: + transactions = prepared_transaction[0] + else: + transactions = instance.objects.transaction(**unprepared_transactions)._transaction + instance._transaction = transactions return instance + return transaction_setter qs = model.__queryset__(model) @@ -323,6 +328,7 @@ def __init__(self, **values): self._values = {} self._ttl = self.__default_ttl__ self._timestamp = None + self._transaction = None for name, column in self._columns.items(): value = values.get(name, None) @@ -553,10 +559,6 @@ def filter(cls, *args, **kwargs): return cls.objects.filter(*args, **kwargs) - @classmethod - def transaction(cls, *args, **kwargs): - return cls.objects.transaction(*args, **kwargs) - @classmethod def get(cls, *args, **kwargs): return cls.objects.get(*args, **kwargs) @@ -576,7 +578,8 @@ def save(self): ttl=self._ttl, timestamp=self._timestamp, consistency=self.__consistency__, - if_not_exists=self._if_not_exists).save() + if_not_exists=self._if_not_exists, + transaction=self._transaction).save() #reset the value managers for v in self._values.values(): @@ -614,7 +617,8 @@ def update(self, **values): batch=self._batch, ttl=self._ttl, timestamp=self._timestamp, - consistency=self.__consistency__).update() + consistency=self.__consistency__, + transaction=self._transaction).update() #reset the value managers for v in self._values.values(): diff --git a/cqlengine/query.py b/cqlengine/query.py index 8ee7e62135..9c715843d1 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -422,11 +422,12 @@ def transaction(self, *args, **kwargs): clone._transaction.append(operator) for col_name, val in kwargs.items(): + exists = False try: column = self.model._get_column(col_name) except KeyError: if col_name in ['exists', 'not_exists']: - pass + exists = True elif col_name == 'pk__token': if not isinstance(val, Token): raise QueryException("Virtual column 'pk__token' may only be compared to Token() values") @@ -445,7 +446,7 @@ def transaction(self, *args, **kwargs): len(val.value), len(partition_columns))) val.set_columns(partition_columns) - if isinstance(val, BaseQueryFunction): + if isinstance(val, BaseQueryFunction) or exists is True: query_val = val else: query_val = column.to_database(val) @@ -834,7 +835,7 @@ class DMLQuery(object): _timestamp = None _if_not_exists = False - def __init__(self, model, instance=None, batch=None, ttl=None, consistency=None, timestamp=None, if_not_exists=False): + def __init__(self, model, instance=None, batch=None, ttl=None, consistency=None, timestamp=None, if_not_exists=False, transaction=None): self.model = model self.column_family_name = self.model.column_family_name() self.instance = instance @@ -843,6 +844,7 @@ def __init__(self, model, instance=None, batch=None, ttl=None, consistency=None, self._consistency = consistency self._timestamp = timestamp self._if_not_exists = if_not_exists + self._transaction = transaction def _execute(self, q): if self._batch: @@ -897,7 +899,8 @@ def update(self): raise CQLEngineException("DML Query intance attribute is None") assert type(self.instance) == self.model static_update_only = True - statement = UpdateStatement(self.column_family_name, ttl=self._ttl, timestamp=self._timestamp) + statement = UpdateStatement(self.column_family_name, ttl=self._ttl, + timestamp=self._timestamp, transactions=self._transaction) #get defined fields and their column names for name, col in self.model._columns.items(): if not col.is_primary_key: @@ -960,7 +963,8 @@ def save(self): if self.instance._has_counter or self.instance._can_update(): return self.update() else: - insert = InsertStatement(self.column_family_name, ttl=self._ttl, timestamp=self._timestamp, if_not_exists=self._if_not_exists) + insert = InsertStatement(self.column_family_name, ttl=self._ttl, timestamp=self._timestamp, if_not_exists=self._if_not_exists, transactions=self._transaction) + for name, col in self.instance._columns.items(): val = getattr(self.instance, name, None) if col._val_is_null(val): diff --git a/cqlengine/statements.py b/cqlengine/statements.py index 5cdef38af2..4df4306128 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -152,6 +152,11 @@ def __unicode__(self): def insert_tuple(self): return self.field, self.context_id + def update_context(self, ctx): + if self.field not in ['exists', 'not_exists']: + return super(TransactionClause, self).update_context(ctx) + return ctx + class ContainerUpdateClause(AssignmentClause): @@ -646,10 +651,12 @@ def get_context(self): ctx = super(AssignmentStatement, self).get_context() for clause in self.assignments: clause.update_context(ctx) + for clause in self.transactions or []: + clause.update_context(ctx) return ctx - def _transactions(self): - return 'IF {}'.format(' AND '.join([six.text_type(c) for c in self.where_clauses])) + def _get_transactions(self): + return 'IF {}'.format(' AND '.join([six.text_type(c) for c in self.transactions])) class InsertStatement(AssignmentStatement): @@ -697,7 +704,7 @@ def __unicode__(self): qs += ["USING TIMESTAMP {}".format(self.timestamp_normalized)] if len(self.transactions) > 0: - qs += [self._transactions] + qs += [self._get_transactions()] return ' '.join(qs) @@ -726,7 +733,7 @@ def __unicode__(self): qs += [self._where] if len(self.transactions) > 0: - qs += [self._transactions] + qs += [self._get_transactions()] return ' '.join(qs) diff --git a/cqlengine/tests/model/test_transaction_statements.py b/cqlengine/tests/model/test_transaction_statements.py index 8afabb1ed2..a62ac122d3 100644 --- a/cqlengine/tests/model/test_transaction_statements.py +++ b/cqlengine/tests/model/test_transaction_statements.py @@ -4,10 +4,13 @@ from mock import patch from cqlengine.exceptions import ValidationError -from cqlengine.tests.base import BaseCassEngTestCase +from unittest import TestCase from cqlengine.models import Model from cqlengine import columns from cqlengine.management import sync_table, drop_table +from cqlengine import connection + +connection.setup(['192.168.56.103'], 'test') class TestUpdateModel(Model): @@ -18,7 +21,7 @@ class TestUpdateModel(Model): text = columns.Text(required=False, index=True) -class ModelUpdateTests(BaseCassEngTestCase): +class ModelUpdateTests(TestCase): @classmethod def setUpClass(cls): @@ -31,6 +34,12 @@ def tearDownClass(cls): drop_table(TestUpdateModel) def test_transaction_insertion(self): - m = TestUpdateModel(count=5, text='something').transaction(exists=True) - m.save() + m = TestUpdateModel.objects.create(count=5, text='something') + all_models = TestUpdateModel.objects.all() + for x in all_models: + pass + m = TestUpdateModel.objects + m = m.transaction(not_exists=True) + m = m.create(count=5, text='something') + m.transaction(count=6).update(text='something else') x = 10 \ No newline at end of file From adc49ac5d231899bc1baf981fc51d6ce8ac79ee7 Mon Sep 17 00:00:00 2001 From: timmartin19 Date: Wed, 8 Oct 2014 17:11:19 -0400 Subject: [PATCH 1010/3726] Added a thrown exception when a transaction fails to be applied --- .gitignore | 3 +++ cqlengine/exceptions.py | 1 + cqlengine/query.py | 11 ++++++++++- 3 files changed, 14 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index b43bb3d158..f576678427 100644 --- a/.gitignore +++ b/.gitignore @@ -45,3 +45,6 @@ docs/_build #iPython *.ipynb + +cqlengine/tests/* +cqlengine/tests* \ No newline at end of file diff --git a/cqlengine/exceptions.py b/cqlengine/exceptions.py index f40f8af700..75473de681 100644 --- a/cqlengine/exceptions.py +++ b/cqlengine/exceptions.py @@ -2,6 +2,7 @@ class CQLEngineException(Exception): pass class ModelException(CQLEngineException): pass class ValidationError(CQLEngineException): pass +class TransactionException(CQLEngineException): pass class UndefinedKeyspaceException(CQLEngineException): pass class LWTException(CQLEngineException): pass diff --git a/cqlengine/query.py b/cqlengine/query.py index 9c715843d1..5f43da0f08 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -6,7 +6,11 @@ from cqlengine.connection import execute +<<<<<<< HEAD from cqlengine.exceptions import CQLEngineException, ValidationError, LWTException +======= +from cqlengine.exceptions import CQLEngineException, ValidationError, TransactionException +>>>>>>> Added a thrown exception when a transaction fails to be applied from cqlengine.functions import Token, BaseQueryFunction, QueryValue, UnicodeMixin #CQL 3 reference: @@ -853,7 +857,12 @@ def _execute(self, q): tmp = execute(q, consistency_level=self._consistency) if self._if_not_exists: check_applied(tmp) - + if self._transaction and tmp[0].get('[applied]', True) is False: + tmp[0].pop('[applied]') + expected = ', '.join('{0}={1}'.format(t.field, t.value) for t in q.transactions) + actual = ', '.join('{0}={1}'.format(f, v) for f, v in tmp[0].items()) + message = 'Transaction statement failed: Expected: {0} Actual: {1}'.format(expected, actual) + raise TransactionException(message) return tmp def batch(self, batch_obj): From f70b59d860f28749be6e6636a9f983bef3f17f5c Mon Sep 17 00:00:00 2001 From: timmartin19 Date: Wed, 15 Oct 2014 09:31:05 -0400 Subject: [PATCH 1011/3726] Reverted tests to original --- .../model/test_transaction_statements.py | 45 ------------------- 1 file changed, 45 deletions(-) delete mode 100644 cqlengine/tests/model/test_transaction_statements.py diff --git a/cqlengine/tests/model/test_transaction_statements.py b/cqlengine/tests/model/test_transaction_statements.py deleted file mode 100644 index a62ac122d3..0000000000 --- a/cqlengine/tests/model/test_transaction_statements.py +++ /dev/null @@ -1,45 +0,0 @@ -__author__ = 'Tim Martin' -from uuid import uuid4 - -from mock import patch -from cqlengine.exceptions import ValidationError - -from unittest import TestCase -from cqlengine.models import Model -from cqlengine import columns -from cqlengine.management import sync_table, drop_table -from cqlengine import connection - -connection.setup(['192.168.56.103'], 'test') - - -class TestUpdateModel(Model): - __keyspace__ = 'test' - partition = columns.UUID(primary_key=True, default=uuid4) - cluster = columns.UUID(primary_key=True, default=uuid4) - count = columns.Integer(required=False) - text = columns.Text(required=False, index=True) - - -class ModelUpdateTests(TestCase): - - @classmethod - def setUpClass(cls): - super(ModelUpdateTests, cls).setUpClass() - sync_table(TestUpdateModel) - - @classmethod - def tearDownClass(cls): - super(ModelUpdateTests, cls).tearDownClass() - drop_table(TestUpdateModel) - - def test_transaction_insertion(self): - m = TestUpdateModel.objects.create(count=5, text='something') - all_models = TestUpdateModel.objects.all() - for x in all_models: - pass - m = TestUpdateModel.objects - m = m.transaction(not_exists=True) - m = m.create(count=5, text='something') - m.transaction(count=6).update(text='something else') - x = 10 \ No newline at end of file From 8e7fe5518bc78adde6af539bb711f4f8fe41be6b Mon Sep 17 00:00:00 2001 From: timmartin19 Date: Thu, 16 Oct 2014 12:18:56 -0400 Subject: [PATCH 1012/3726] Small fixes and tests added --- cqlengine/connection.py | 9 ++++++++- cqlengine/models.py | 7 ++++--- cqlengine/query.py | 15 +++++++-------- cqlengine/statements.py | 6 +----- cqlengine/tests/base.py | 12 +++++++----- 5 files changed, 27 insertions(+), 22 deletions(-) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index 444400d5b0..8e465e8d1c 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -5,6 +5,7 @@ from collections import namedtuple from cassandra.cluster import Cluster, NoHostAvailable from cassandra.query import SimpleStatement, Statement +from cqlengine.exceptions import TransactionException import six try: @@ -88,7 +89,7 @@ def setup( def execute(query, params=None, consistency_level=None): handle_lazy_connect() - + statement = query if not session: raise CQLEngineException("It is required to setup() cqlengine before executing queries") @@ -111,6 +112,12 @@ def execute(query, params=None, consistency_level=None): params = params or {} result = session.execute(query, params) + if result and result[0].get('[applied]', True) is False: + result[0].pop('[applied]') + expected = ', '.join('{0}={1}'.format(t.field, t.value) for t in statement.transactions) + actual = ', '.join('{0}={1}'.format(f, v) for f, v in result[0].items()) + message = 'Transaction statement failed: Expected: {0} Actual: {1}'.format(expected, actual) + raise TransactionException(message) return result def get_session(): diff --git a/cqlengine/models.py b/cqlengine/models.py index f3114d6c21..d8e0b778f0 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -91,9 +91,10 @@ def transaction_setter(*prepared_transaction, **unprepared_transactions): return transaction_setter qs = model.__queryset__(model) - def transaction_setter(transaction): - qs._transaction = transaction - return instance + def transaction_setter(**unprepared_transactions): + transactions = model.objects.transaction(**unprepared_transactions)._transaction + qs._transaction = transactions + return qs return transaction_setter def __call__(self, *args, **kwargs): diff --git a/cqlengine/query.py b/cqlengine/query.py index 5f43da0f08..384bcf7168 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -5,12 +5,7 @@ from cqlengine.columns import Counter, List, Set from cqlengine.connection import execute - -<<<<<<< HEAD -from cqlengine.exceptions import CQLEngineException, ValidationError, LWTException -======= -from cqlengine.exceptions import CQLEngineException, ValidationError, TransactionException ->>>>>>> Added a thrown exception when a transaction fails to be applied +from cqlengine.exceptions import CQLEngineException, ValidationError, TransactionException, LWTException from cqlengine.functions import Token, BaseQueryFunction, QueryValue, UnicodeMixin #CQL 3 reference: @@ -430,7 +425,7 @@ def transaction(self, *args, **kwargs): try: column = self.model._get_column(col_name) except KeyError: - if col_name in ['exists', 'not_exists']: + if col_name == 'not_exists': exists = True elif col_name == 'pk__token': if not isinstance(val, Token): @@ -784,7 +779,8 @@ def update(self, **values): return nulled_columns = set() - us = UpdateStatement(self.column_family_name, where=self._where, ttl=self._ttl, timestamp=self._timestamp) + us = UpdateStatement(self.column_family_name, where=self._where, ttl=self._ttl, + timestamp=self._timestamp, transactions=self._transaction) for name, val in values.items(): col_name, col_op = self._parse_filter_arg(name) col = self.model._columns.get(col_name) @@ -855,6 +851,7 @@ def _execute(self, q): return self._batch.add_query(q) else: tmp = execute(q, consistency_level=self._consistency) +<<<<<<< HEAD if self._if_not_exists: check_applied(tmp) if self._transaction and tmp[0].get('[applied]', True) is False: @@ -863,6 +860,8 @@ def _execute(self, q): actual = ', '.join('{0}={1}'.format(f, v) for f, v in tmp[0].items()) message = 'Transaction statement failed: Expected: {0} Actual: {1}'.format(expected, actual) raise TransactionException(message) +======= +>>>>>>> Small fixes and tests added return tmp def batch(self, batch_obj): diff --git a/cqlengine/statements.py b/cqlengine/statements.py index 4df4306128..7fb24566ee 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -143,8 +143,6 @@ class TransactionClause(BaseClause): """ A single variable transaction statement """ def __unicode__(self): - if self.field == 'exists': - return u'EXISTS' if self.field == 'not_exists': return u'NOT EXISTS' return u'"{}" = %({})s'.format(self.field, self.context_id) @@ -153,9 +151,7 @@ def insert_tuple(self): return self.field, self.context_id def update_context(self, ctx): - if self.field not in ['exists', 'not_exists']: - return super(TransactionClause, self).update_context(ctx) - return ctx + return super(TransactionClause, self).update_context(ctx) class ContainerUpdateClause(AssignmentClause): diff --git a/cqlengine/tests/base.py b/cqlengine/tests/base.py index dea0cc191b..90581e4244 100644 --- a/cqlengine/tests/base.py +++ b/cqlengine/tests/base.py @@ -2,9 +2,9 @@ import os import sys import six -from cqlengine.connection import get_session +from cqlengine import connection -CASSANDRA_VERSION = int(os.environ['CASSANDRA_VERSION']) +CASSANDRA_VERSION = 20 #int(os.environ['CASSANDRA_VERSION']) class BaseCassEngTestCase(TestCase): @@ -13,9 +13,11 @@ class BaseCassEngTestCase(TestCase): # super(BaseCassEngTestCase, cls).setUpClass() session = None - def setUp(self): - self.session = get_session() - super(BaseCassEngTestCase, self).setUp() + @classmethod + def setUpClass(cls): + connection.setup(['192.168.56.103'], 'test') + cls.session = connection.get_session() + super(BaseCassEngTestCase, cls).setUpClass() def assertHasAttr(self, obj, attr): self.assertTrue(hasattr(obj, attr), From 043982eb42af925cb9677ea8ee26931e5ecaa6b0 Mon Sep 17 00:00:00 2001 From: timmartin19 Date: Thu, 16 Oct 2014 12:19:54 -0400 Subject: [PATCH 1013/3726] Reverted test base to original --- cqlengine/tests/base.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/cqlengine/tests/base.py b/cqlengine/tests/base.py index 90581e4244..dea0cc191b 100644 --- a/cqlengine/tests/base.py +++ b/cqlengine/tests/base.py @@ -2,9 +2,9 @@ import os import sys import six -from cqlengine import connection +from cqlengine.connection import get_session -CASSANDRA_VERSION = 20 #int(os.environ['CASSANDRA_VERSION']) +CASSANDRA_VERSION = int(os.environ['CASSANDRA_VERSION']) class BaseCassEngTestCase(TestCase): @@ -13,11 +13,9 @@ class BaseCassEngTestCase(TestCase): # super(BaseCassEngTestCase, cls).setUpClass() session = None - @classmethod - def setUpClass(cls): - connection.setup(['192.168.56.103'], 'test') - cls.session = connection.get_session() - super(BaseCassEngTestCase, cls).setUpClass() + def setUp(self): + self.session = get_session() + super(BaseCassEngTestCase, self).setUp() def assertHasAttr(self, obj, attr): self.assertTrue(hasattr(obj, attr), From 1d71f8c9d2223d59b8399abbf92c0b4b759e3784 Mon Sep 17 00:00:00 2001 From: timmartin19 Date: Thu, 16 Oct 2014 13:32:02 -0400 Subject: [PATCH 1014/3726] Added tests for real. --- .gitignore | 3 - .../statements/test_transaction_statement.py | 30 +++++++ cqlengine/tests/test_transaction.py | 78 +++++++++++++++++++ 3 files changed, 108 insertions(+), 3 deletions(-) create mode 100644 cqlengine/tests/statements/test_transaction_statement.py create mode 100644 cqlengine/tests/test_transaction.py diff --git a/.gitignore b/.gitignore index f576678427..b43bb3d158 100644 --- a/.gitignore +++ b/.gitignore @@ -45,6 +45,3 @@ docs/_build #iPython *.ipynb - -cqlengine/tests/* -cqlengine/tests* \ No newline at end of file diff --git a/cqlengine/tests/statements/test_transaction_statement.py b/cqlengine/tests/statements/test_transaction_statement.py new file mode 100644 index 0000000000..4ead43a3b4 --- /dev/null +++ b/cqlengine/tests/statements/test_transaction_statement.py @@ -0,0 +1,30 @@ +__author__ = 'Tim Martin' +from unittest import TestCase +from cqlengine.statements import TransactionClause +import six + +class TestTransactionClause(TestCase): + + def test_not_exists_clause(self): + tc = TransactionClause('not_exists', True) + + self.assertEqual('NOT EXISTS', six.text_type(tc)) + self.assertEqual('NOT EXISTS', str(tc)) + + def test_normal_transaction(self): + tc = TransactionClause('some_value', 23) + tc.set_context_id(3) + + self.assertEqual('"some_value" = %(3)s', six.text_type(tc)) + self.assertEqual('"some_value" = %(3)s', str(tc)) + + def test_equality(self): + tc1 = TransactionClause('some_value', 5) + tc2 = TransactionClause('some_value', 5) + + assert tc1 == tc2 + + tc3 = TransactionClause('not_exists', True) + tc4 = TransactionClause('not_exists', True) + + assert tc3 == tc4 \ No newline at end of file diff --git a/cqlengine/tests/test_transaction.py b/cqlengine/tests/test_transaction.py new file mode 100644 index 0000000000..9dbe963252 --- /dev/null +++ b/cqlengine/tests/test_transaction.py @@ -0,0 +1,78 @@ +__author__ = 'Tim Martin' +from cqlengine.management import sync_table, drop_table +from cqlengine.tests.base import BaseCassEngTestCase +from cqlengine.models import Model +from cqlengine.exceptions import TransactionException +from uuid import uuid4 +from cqlengine import columns +import mock +from cqlengine import ALL, BatchQuery + + +class TestTransactionModel(Model): + __keyspace__ = 'test' + id = columns.UUID(primary_key=True, default=lambda:uuid4()) + count = columns.Integer() + text = columns.Text(required=False) + + +class TestTransaction(BaseCassEngTestCase): + + @classmethod + def setUpClass(cls): + super(TestTransaction, cls).setUpClass() + sync_table(TestTransactionModel) + + @classmethod + def tearDownClass(cls): + super(TestTransaction, cls).tearDownClass() + drop_table(TestTransactionModel) + + def test_create_uses_transaction(self): + qs = TestTransactionModel.transaction(not_exists=True) + with mock.patch.object(self.session, 'execute') as m: + qs.create(text='blah blah', count=2) + args = m.call_args + self.assertIn('IF NOT EXISTS', args[0][0].query_string) + + def test_queryset_returned_on_create(self): + qs = TestTransactionModel.transaction(not_exists=True) + self.assertTrue(isinstance(qs, TestTransactionModel.__queryset__), type(qs)) + + def test_update_using_transaction(self): + t = TestTransactionModel.create(text='blah blah') + t.text = 'new blah' + with mock.patch.object(self.session, 'execute') as m: + t.transaction(text='blah blah').save() + + args = m.call_args + self.assertIn('IF "text" = %(0)s', args[0][0].query_string) + + def test_update_failure(self): + t = TestTransactionModel.create(text='blah blah') + t.text = 'new blah' + t = t.transaction(text='something wrong') + self.assertRaises(TransactionException, t.save) + + def test_creation_failure(self): + t = TestTransactionModel.create(text='blah blah') + t_clone = TestTransactionModel.transaction(not_exists=True) + self.assertRaises(TransactionException, t_clone.create, id=t.id, count=t.count, text=t.text) + + def test_blind_update(self): + t = TestTransactionModel.create(text='blah blah') + t.text = 'something else' + uid = t.id + + with mock.patch.object(self.session, 'execute') as m: + TestTransactionModel.objects(id=uid).transaction(text='blah blah').update(text='oh hey der') + + args = m.call_args + self.assertIn('IF "text" = %(1)s', args[0][0].query_string) + + def test_blind_update_fail(self): + t = TestTransactionModel.create(text='blah blah') + t.text = 'something else' + uid = t.id + qs = TestTransactionModel.objects(id=uid).transaction(text='Not dis!') + self.assertRaises(TransactionException, qs.update, text='this will never work') \ No newline at end of file From 7f733f09d616d928cafd74e719b991192223e0ab Mon Sep 17 00:00:00 2001 From: timmartin19 Date: Mon, 20 Oct 2014 10:24:28 -0400 Subject: [PATCH 1015/3726] Removed transaction from insert statement --- cqlengine/statements.py | 73 ++++++++++++++++++++++++----------------- 1 file changed, 43 insertions(+), 30 deletions(-) diff --git a/cqlengine/statements.py b/cqlengine/statements.py index 7fb24566ee..30ac809a9b 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -143,8 +143,6 @@ class TransactionClause(BaseClause): """ A single variable transaction statement """ def __unicode__(self): - if self.field == 'not_exists': - return u'NOT EXISTS' return u'"{}" = %({})s'.format(self.field, self.context_id) def insert_tuple(self): @@ -588,8 +586,7 @@ def __init__(self, consistency=None, where=None, ttl=None, - timestamp=None, - transactions=None): + timestamp=None): super(AssignmentStatement, self).__init__( table, consistency=consistency, @@ -603,11 +600,6 @@ def __init__(self, for assignment in assignments or []: self.add_assignment_clause(assignment) - # Add transaction statements - self.transactions = [] - for transaction in transactions or []: - self.add_transaction_clause(transaction) - def update_context_id(self, i): super(AssignmentStatement, self).update_context_id(i) for assignment in self.assignments: @@ -626,19 +618,6 @@ def add_assignment_clause(self, clause): self.context_counter += clause.get_context_size() self.assignments.append(clause) - def add_transaction_clause(self, clause): - """ - Adds a transaction clause to this statement - - :param clause: The clause that will be added to the transaction statement - :type clause: TransactionClause - """ - if not isinstance(clause, TransactionClause): - raise StatementException('only instances of AssignmentClause can be added to statements') - clause.set_context_id(self.context_counter) - self.context_counter += clause.get_context_size() - self.transactions.append(clause) - @property def is_empty(self): return len(self.assignments) == 0 @@ -647,13 +626,8 @@ def get_context(self): ctx = super(AssignmentStatement, self).get_context() for clause in self.assignments: clause.update_context(ctx) - for clause in self.transactions or []: - clause.update_context(ctx) return ctx - def _get_transactions(self): - return 'IF {}'.format(' AND '.join([six.text_type(c) for c in self.transactions])) - class InsertStatement(AssignmentStatement): """ an cql insert select statement """ @@ -699,15 +673,32 @@ def __unicode__(self): if self.timestamp: qs += ["USING TIMESTAMP {}".format(self.timestamp_normalized)] - if len(self.transactions) > 0: - qs += [self._get_transactions()] - return ' '.join(qs) class UpdateStatement(AssignmentStatement): """ an cql update select statement """ + def __init__(self, + table, + assignments=None, + consistency=None, + where=None, + ttl=None, + timestamp=None, + transactions=None): + super(UpdateStatement, self). __init__(table, + assignments=assignments, + consistency=consistency, + where=where, + ttl=ttl, + timestamp=timestamp) + + # Add transaction statements + self.transactions = [] + for transaction in transactions or []: + self.add_transaction_clause(transaction) + def __unicode__(self): qs = ['UPDATE', self.table] @@ -733,6 +724,28 @@ def __unicode__(self): return ' '.join(qs) + def add_transaction_clause(self, clause): + """ + Adds a transaction clause to this statement + + :param clause: The clause that will be added to the transaction statement + :type clause: TransactionClause + """ + if not isinstance(clause, TransactionClause): + raise StatementException('only instances of AssignmentClause can be added to statements') + clause.set_context_id(self.context_counter) + self.context_counter += clause.get_context_size() + self.transactions.append(clause) + + def get_context(self): + ctx = super(UpdateStatement, self).get_context() + for clause in self.transactions or []: + clause.update_context(ctx) + return ctx + + def _get_transactions(self): + return 'IF {}'.format(' AND '.join([six.text_type(c) for c in self.transactions])) + class DeleteStatement(BaseCQLStatement): """ a cql delete statement """ From e7182dfcf4029dc71340ebfbbbe550acfa4a55c4 Mon Sep 17 00:00:00 2001 From: timmartin19 Date: Mon, 20 Oct 2014 10:30:59 -0400 Subject: [PATCH 1016/3726] Converted .transaction() to .iff() --- cqlengine/models.py | 4 ++-- cqlengine/query.py | 12 ++++------ cqlengine/statements.py | 8 +++---- .../statements/test_transaction_statement.py | 20 ++-------------- cqlengine/tests/test_transaction.py | 24 ++++--------------- 5 files changed, 17 insertions(+), 51 deletions(-) diff --git a/cqlengine/models.py b/cqlengine/models.py index d8e0b778f0..69b629782f 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -84,7 +84,7 @@ def transaction_setter(*prepared_transaction, **unprepared_transactions): if len(prepared_transaction) > 0: transactions = prepared_transaction[0] else: - transactions = instance.objects.transaction(**unprepared_transactions)._transaction + transactions = instance.objects.iff(**unprepared_transactions)._transaction instance._transaction = transactions return instance @@ -92,7 +92,7 @@ def transaction_setter(*prepared_transaction, **unprepared_transactions): qs = model.__queryset__(model) def transaction_setter(**unprepared_transactions): - transactions = model.objects.transaction(**unprepared_transactions)._transaction + transactions = model.objects.iff(**unprepared_transactions)._transaction qs._transaction = transactions return qs return transaction_setter diff --git a/cqlengine/query.py b/cqlengine/query.py index 2845274efe..cc00d5f1d4 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -25,7 +25,7 @@ class MultipleObjectsReturned(QueryException): pass def check_applied(result): """ check if result contains some column '[applied]' with false value, - if that value is false, it means our light-weight transaction didn't + if that value is false, it means our light-weight iff didn't applied to database. """ if result and '[applied]' in result[0] and result[0]['[applied]'] == False: @@ -92,7 +92,7 @@ def __init__(self, batch_type=None, timestamp=None, consistency=None, execute_on :param batch_type: (optional) One of batch type values available through BatchType enum :type batch_type: str or None :param timestamp: (optional) A datetime or timedelta object with desired timestamp to be applied - to the batch transaction. + to the batch iff. :type timestamp: datetime or timedelta or None :param consistency: (optional) One of consistency values ("ANY", "ONE", "QUORUM" etc) :type consistency: str or None @@ -409,10 +409,10 @@ def _parse_filter_arg(self, arg): else: raise QueryException("Can't parse '{}'".format(arg)) - def transaction(self, *args, **kwargs): + def iff(self, *args, **kwargs): """Adds IF statements to queryset""" if len([x for x in kwargs.values() if x is None]): - raise CQLEngineException("None values on transaction are not allowed") + raise CQLEngineException("None values on iff are not allowed") clone = copy.deepcopy(self) for operator in args: @@ -425,9 +425,7 @@ def transaction(self, *args, **kwargs): try: column = self.model._get_column(col_name) except KeyError: - if col_name == 'not_exists': - exists = True - elif col_name == 'pk__token': + if col_name == 'pk__token': if not isinstance(val, Token): raise QueryException("Virtual column 'pk__token' may only be compared to Token() values") column = columns._PartitionKeysToken(self.model) diff --git a/cqlengine/statements.py b/cqlengine/statements.py index 30ac809a9b..445df8f21c 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -140,7 +140,7 @@ def insert_tuple(self): class TransactionClause(BaseClause): - """ A single variable transaction statement """ + """ A single variable iff statement """ def __unicode__(self): return u'"{}" = %({})s'.format(self.field, self.context_id) @@ -694,7 +694,7 @@ def __init__(self, ttl=ttl, timestamp=timestamp) - # Add transaction statements + # Add iff statements self.transactions = [] for transaction in transactions or []: self.add_transaction_clause(transaction) @@ -726,9 +726,9 @@ def __unicode__(self): def add_transaction_clause(self, clause): """ - Adds a transaction clause to this statement + Adds a iff clause to this statement - :param clause: The clause that will be added to the transaction statement + :param clause: The clause that will be added to the iff statement :type clause: TransactionClause """ if not isinstance(clause, TransactionClause): diff --git a/cqlengine/tests/statements/test_transaction_statement.py b/cqlengine/tests/statements/test_transaction_statement.py index 4ead43a3b4..0477cda6d7 100644 --- a/cqlengine/tests/statements/test_transaction_statement.py +++ b/cqlengine/tests/statements/test_transaction_statement.py @@ -3,28 +3,12 @@ from cqlengine.statements import TransactionClause import six -class TestTransactionClause(TestCase): - - def test_not_exists_clause(self): - tc = TransactionClause('not_exists', True) - self.assertEqual('NOT EXISTS', six.text_type(tc)) - self.assertEqual('NOT EXISTS', str(tc)) +class TestTransactionClause(TestCase): def test_normal_transaction(self): tc = TransactionClause('some_value', 23) tc.set_context_id(3) self.assertEqual('"some_value" = %(3)s', six.text_type(tc)) - self.assertEqual('"some_value" = %(3)s', str(tc)) - - def test_equality(self): - tc1 = TransactionClause('some_value', 5) - tc2 = TransactionClause('some_value', 5) - - assert tc1 == tc2 - - tc3 = TransactionClause('not_exists', True) - tc4 = TransactionClause('not_exists', True) - - assert tc3 == tc4 \ No newline at end of file + self.assertEqual('"some_value" = %(3)s', str(tc)) \ No newline at end of file diff --git a/cqlengine/tests/test_transaction.py b/cqlengine/tests/test_transaction.py index 9dbe963252..13a4a59de7 100644 --- a/cqlengine/tests/test_transaction.py +++ b/cqlengine/tests/test_transaction.py @@ -28,22 +28,11 @@ def tearDownClass(cls): super(TestTransaction, cls).tearDownClass() drop_table(TestTransactionModel) - def test_create_uses_transaction(self): - qs = TestTransactionModel.transaction(not_exists=True) - with mock.patch.object(self.session, 'execute') as m: - qs.create(text='blah blah', count=2) - args = m.call_args - self.assertIn('IF NOT EXISTS', args[0][0].query_string) - - def test_queryset_returned_on_create(self): - qs = TestTransactionModel.transaction(not_exists=True) - self.assertTrue(isinstance(qs, TestTransactionModel.__queryset__), type(qs)) - def test_update_using_transaction(self): t = TestTransactionModel.create(text='blah blah') t.text = 'new blah' with mock.patch.object(self.session, 'execute') as m: - t.transaction(text='blah blah').save() + t.iff(text='blah blah').save() args = m.call_args self.assertIn('IF "text" = %(0)s', args[0][0].query_string) @@ -51,21 +40,16 @@ def test_update_using_transaction(self): def test_update_failure(self): t = TestTransactionModel.create(text='blah blah') t.text = 'new blah' - t = t.transaction(text='something wrong') + t = t.iff(text='something wrong') self.assertRaises(TransactionException, t.save) - def test_creation_failure(self): - t = TestTransactionModel.create(text='blah blah') - t_clone = TestTransactionModel.transaction(not_exists=True) - self.assertRaises(TransactionException, t_clone.create, id=t.id, count=t.count, text=t.text) - def test_blind_update(self): t = TestTransactionModel.create(text='blah blah') t.text = 'something else' uid = t.id with mock.patch.object(self.session, 'execute') as m: - TestTransactionModel.objects(id=uid).transaction(text='blah blah').update(text='oh hey der') + TestTransactionModel.objects(id=uid).iff(text='blah blah').update(text='oh hey der') args = m.call_args self.assertIn('IF "text" = %(1)s', args[0][0].query_string) @@ -74,5 +58,5 @@ def test_blind_update_fail(self): t = TestTransactionModel.create(text='blah blah') t.text = 'something else' uid = t.id - qs = TestTransactionModel.objects(id=uid).transaction(text='Not dis!') + qs = TestTransactionModel.objects(id=uid).iff(text='Not dis!') self.assertRaises(TransactionException, qs.update, text='this will never work') \ No newline at end of file From 2ba9a99dca2ea155f0df8e9c18491e44fcb8c358 Mon Sep 17 00:00:00 2001 From: timmartin19 Date: Mon, 20 Oct 2014 11:05:34 -0400 Subject: [PATCH 1017/3726] Added documentation. Changed .transaction() to .iff(), removed TransactionException and replaced with LWTException --- cqlengine/connection.py | 8 +------- cqlengine/exceptions.py | 1 - cqlengine/models.py | 2 +- cqlengine/query.py | 12 +++--------- cqlengine/tests/base.py | 5 ++++- cqlengine/tests/test_transaction.py | 6 +++--- docs/topics/models.rst | 8 ++++++++ 7 files changed, 20 insertions(+), 22 deletions(-) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index 8e465e8d1c..33c9bbe08a 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -5,7 +5,7 @@ from collections import namedtuple from cassandra.cluster import Cluster, NoHostAvailable from cassandra.query import SimpleStatement, Statement -from cqlengine.exceptions import TransactionException +from cqlengine.exceptions import LWTException import six try: @@ -112,12 +112,6 @@ def execute(query, params=None, consistency_level=None): params = params or {} result = session.execute(query, params) - if result and result[0].get('[applied]', True) is False: - result[0].pop('[applied]') - expected = ', '.join('{0}={1}'.format(t.field, t.value) for t in statement.transactions) - actual = ', '.join('{0}={1}'.format(f, v) for f, v in result[0].items()) - message = 'Transaction statement failed: Expected: {0} Actual: {1}'.format(expected, actual) - raise TransactionException(message) return result def get_session(): diff --git a/cqlengine/exceptions.py b/cqlengine/exceptions.py index 75473de681..f40f8af700 100644 --- a/cqlengine/exceptions.py +++ b/cqlengine/exceptions.py @@ -2,7 +2,6 @@ class CQLEngineException(Exception): pass class ModelException(CQLEngineException): pass class ValidationError(CQLEngineException): pass -class TransactionException(CQLEngineException): pass class UndefinedKeyspaceException(CQLEngineException): pass class LWTException(CQLEngineException): pass diff --git a/cqlengine/models.py b/cqlengine/models.py index 69b629782f..5003b3c018 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -266,7 +266,7 @@ class MultipleObjectsReturned(_MultipleObjectsReturned): pass objects = QuerySetDescriptor() ttl = TTLDescriptor() consistency = ConsistencyDescriptor() - transaction = TransactionDescriptor() + iff = TransactionDescriptor() # custom timestamps, see USING TIMESTAMP X timestamp = TimestampDescriptor() diff --git a/cqlengine/query.py b/cqlengine/query.py index cc00d5f1d4..d9bef1aa90 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -5,7 +5,7 @@ from cqlengine.columns import Counter, List, Set from cqlengine.connection import execute -from cqlengine.exceptions import CQLEngineException, ValidationError, TransactionException, LWTException +from cqlengine.exceptions import CQLEngineException, ValidationError, LWTException from cqlengine.functions import Token, BaseQueryFunction, QueryValue, UnicodeMixin #CQL 3 reference: @@ -849,14 +849,8 @@ def _execute(self, q): return self._batch.add_query(q) else: tmp = execute(q, consistency_level=self._consistency) - if self._if_not_exists: + if self._if_not_exists or self._transaction: check_applied(tmp) - if self._transaction and tmp[0].get('[applied]', True) is False: - tmp[0].pop('[applied]') - expected = ', '.join('{0}={1}'.format(t.field, t.value) for t in q.transactions) - actual = ', '.join('{0}={1}'.format(f, v) for f, v in tmp[0].items()) - message = 'Transaction statement failed: Expected: {0} Actual: {1}'.format(expected, actual) - raise TransactionException(message) return tmp def batch(self, batch_obj): @@ -966,7 +960,7 @@ def save(self): if self.instance._has_counter or self.instance._can_update(): return self.update() else: - insert = InsertStatement(self.column_family_name, ttl=self._ttl, timestamp=self._timestamp, if_not_exists=self._if_not_exists, transactions=self._transaction) + insert = InsertStatement(self.column_family_name, ttl=self._ttl, timestamp=self._timestamp, if_not_exists=self._if_not_exists) for name, col in self.instance._columns.items(): val = getattr(self.instance, name, None) if col._val_is_null(val): diff --git a/cqlengine/tests/base.py b/cqlengine/tests/base.py index dea0cc191b..96dda91e74 100644 --- a/cqlengine/tests/base.py +++ b/cqlengine/tests/base.py @@ -3,8 +3,11 @@ import sys import six from cqlengine.connection import get_session +from cqlengine import connection -CASSANDRA_VERSION = int(os.environ['CASSANDRA_VERSION']) +CASSANDRA_VERSION = 20 #int(os.environ['CASSANDRA_VERSION']) + +connection.setup(['192.168.56.103'], 'cqlengine_test') class BaseCassEngTestCase(TestCase): diff --git a/cqlengine/tests/test_transaction.py b/cqlengine/tests/test_transaction.py index 13a4a59de7..69e6cb602b 100644 --- a/cqlengine/tests/test_transaction.py +++ b/cqlengine/tests/test_transaction.py @@ -2,7 +2,7 @@ from cqlengine.management import sync_table, drop_table from cqlengine.tests.base import BaseCassEngTestCase from cqlengine.models import Model -from cqlengine.exceptions import TransactionException +from cqlengine.exceptions import LWTException from uuid import uuid4 from cqlengine import columns import mock @@ -41,7 +41,7 @@ def test_update_failure(self): t = TestTransactionModel.create(text='blah blah') t.text = 'new blah' t = t.iff(text='something wrong') - self.assertRaises(TransactionException, t.save) + self.assertRaises(LWTException, t.save) def test_blind_update(self): t = TestTransactionModel.create(text='blah blah') @@ -59,4 +59,4 @@ def test_blind_update_fail(self): t.text = 'something else' uid = t.id qs = TestTransactionModel.objects(id=uid).iff(text='Not dis!') - self.assertRaises(TransactionException, qs.update, text='this will never work') \ No newline at end of file + self.assertRaises(LWTException, qs.update, text='this will never work') \ No newline at end of file diff --git a/docs/topics/models.rst b/docs/topics/models.rst index 618838ec04..567e823792 100644 --- a/docs/topics/models.rst +++ b/docs/topics/models.rst @@ -207,6 +207,14 @@ Model Methods This method is supported on Cassandra 2.0 or later. + .. method:: iff(**values) + + Checks to ensure that the values specified are correct on the Cassandra cluster. + Simply specify the column(s) and the expected value(s). As with if_not_exists, + this incurs a performance cost. + + If the insertion isn't applied, a LWTException is raised + .. method:: update(**values) Performs an update on the model instance. You can pass in values to set on the model From d6eb32fccc6e2e1aa2607b441374ef52ed3a6402 Mon Sep 17 00:00:00 2001 From: timmartin19 Date: Mon, 20 Oct 2014 11:43:32 -0400 Subject: [PATCH 1018/3726] Small bug fixes to .iff() --- cqlengine/query.py | 2 + cqlengine/statements.py | 9 +++-- .../statements/test_transaction_statement.py | 14 ------- cqlengine/tests/test_transaction.py | 39 ++++++++++++++++++- 4 files changed, 45 insertions(+), 19 deletions(-) delete mode 100644 cqlengine/tests/statements/test_transaction_statement.py diff --git a/cqlengine/query.py b/cqlengine/query.py index d9bef1aa90..38762a8a93 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -245,6 +245,8 @@ def _execute(self, q): return self._batch.add_query(q) else: result = execute(q, consistency_level=self._consistency) + if self._transaction: + check_applied(result) return result def __unicode__(self): diff --git a/cqlengine/statements.py b/cqlengine/statements.py index 445df8f21c..ab8b4667f0 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -148,9 +148,6 @@ def __unicode__(self): def insert_tuple(self): return self.field, self.context_id - def update_context(self, ctx): - return super(TransactionClause, self).update_context(ctx) - class ContainerUpdateClause(AssignmentClause): @@ -746,6 +743,12 @@ def get_context(self): def _get_transactions(self): return 'IF {}'.format(' AND '.join([six.text_type(c) for c in self.transactions])) + def update_context_id(self, i): + super(UpdateStatement, self).update_context_id(i) + for transaction in self.transactions: + transaction.set_context_id(self.context_counter) + self.context_counter += transaction.get_context_size() + class DeleteStatement(BaseCQLStatement): """ a cql delete statement """ diff --git a/cqlengine/tests/statements/test_transaction_statement.py b/cqlengine/tests/statements/test_transaction_statement.py deleted file mode 100644 index 0477cda6d7..0000000000 --- a/cqlengine/tests/statements/test_transaction_statement.py +++ /dev/null @@ -1,14 +0,0 @@ -__author__ = 'Tim Martin' -from unittest import TestCase -from cqlengine.statements import TransactionClause -import six - - -class TestTransactionClause(TestCase): - - def test_normal_transaction(self): - tc = TransactionClause('some_value', 23) - tc.set_context_id(3) - - self.assertEqual('"some_value" = %(3)s', six.text_type(tc)) - self.assertEqual('"some_value" = %(3)s', str(tc)) \ No newline at end of file diff --git a/cqlengine/tests/test_transaction.py b/cqlengine/tests/test_transaction.py index 69e6cb602b..43eb488f37 100644 --- a/cqlengine/tests/test_transaction.py +++ b/cqlengine/tests/test_transaction.py @@ -4,9 +4,11 @@ from cqlengine.models import Model from cqlengine.exceptions import LWTException from uuid import uuid4 -from cqlengine import columns +from cqlengine import columns, BatchQuery import mock from cqlengine import ALL, BatchQuery +from cqlengine.statements import TransactionClause +import six class TestTransactionModel(Model): @@ -37,6 +39,16 @@ def test_update_using_transaction(self): args = m.call_args self.assertIn('IF "text" = %(0)s', args[0][0].query_string) + def test_update_transaction_success(self): + t = TestTransactionModel.create(text='blah blah', count=5) + id = t.id + t.text = 'new blah' + t.iff(text='blah blah').save() + + updated = TestTransactionModel.objects(id=id).first() + self.assertEqual(updated.count, 5) + self.assertEqual(updated.text, 'new blah') + def test_update_failure(self): t = TestTransactionModel.create(text='blah blah') t.text = 'new blah' @@ -59,4 +71,27 @@ def test_blind_update_fail(self): t.text = 'something else' uid = t.id qs = TestTransactionModel.objects(id=uid).iff(text='Not dis!') - self.assertRaises(LWTException, qs.update, text='this will never work') \ No newline at end of file + self.assertRaises(LWTException, qs.update, text='this will never work') + + def test_transaction_clause(self): + tc = TransactionClause('some_value', 23) + tc.set_context_id(3) + + self.assertEqual('"some_value" = %(3)s', six.text_type(tc)) + self.assertEqual('"some_value" = %(3)s', str(tc)) + + def test_batch_update_transaction(self): + t = TestTransactionModel.create(text='something', count=5) + id = t.id + with BatchQuery() as b: + t.batch(b).iff(count=5).update(text='something else') + + updated = TestTransactionModel.objects(id=id).first() + self.assertEqual(updated.text, 'something else') + + b = BatchQuery() + updated.batch(b).iff(count=6).update(text='and another thing') + self.assertRaises(LWTException, b.execute) + + updated = TestTransactionModel.objects(id=id).first() + self.assertEqual(updated.text, 'something else') \ No newline at end of file From 1fc6c6422fbcc94e74b638e74cc7a38a1c2fdaff Mon Sep 17 00:00:00 2001 From: timmartin19 Date: Mon, 20 Oct 2014 11:47:03 -0400 Subject: [PATCH 1019/3726] Included code sample in docs for iff --- docs/topics/models.rst | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/docs/topics/models.rst b/docs/topics/models.rst index 567e823792..bd50d95c05 100644 --- a/docs/topics/models.rst +++ b/docs/topics/models.rst @@ -215,6 +215,13 @@ Model Methods If the insertion isn't applied, a LWTException is raised + .. code-block:: + t = TestTransactionModel(text='some text', count=5) + try: + t.iff(count=5).update('other text') + except LWTException as e: + # handle failure + .. method:: update(**values) Performs an update on the model instance. You can pass in values to set on the model From d65bb7919751b2662fb85ac06868c804f33d963f Mon Sep 17 00:00:00 2001 From: timmartin19 Date: Mon, 20 Oct 2014 11:51:31 -0400 Subject: [PATCH 1020/3726] =?UTF-8?q?Weird=E2=80=A6?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- cqlengine/VERSION | 4 ---- 1 file changed, 4 deletions(-) diff --git a/cqlengine/VERSION b/cqlengine/VERSION index 282e22cc86..1cf0537c34 100644 --- a/cqlengine/VERSION +++ b/cqlengine/VERSION @@ -1,5 +1 @@ -<<<<<<< HEAD 0.19.0 -======= -0.18.2 ->>>>>>> FETCH_HEAD From 92c5ce317a1c7ae009a7a80e1b0b8af885290942 Mon Sep 17 00:00:00 2001 From: timmartin19 Date: Mon, 20 Oct 2014 11:55:42 -0400 Subject: [PATCH 1021/3726] Small cleanups --- cqlengine/connection.py | 2 +- cqlengine/query.py | 4 ++-- cqlengine/tests/base.py | 5 +---- setup.py | 4 ++-- 4 files changed, 6 insertions(+), 9 deletions(-) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index 33c9bbe08a..39c9e9d295 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -89,7 +89,7 @@ def setup( def execute(query, params=None, consistency_level=None): handle_lazy_connect() - statement = query + if not session: raise CQLEngineException("It is required to setup() cqlengine before executing queries") diff --git a/cqlengine/query.py b/cqlengine/query.py index 38762a8a93..2fbcc47e65 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -25,7 +25,7 @@ class MultipleObjectsReturned(QueryException): pass def check_applied(result): """ check if result contains some column '[applied]' with false value, - if that value is false, it means our light-weight iff didn't + if that value is false, it means our light-weight transaction didn't applied to database. """ if result and '[applied]' in result[0] and result[0]['[applied]'] == False: @@ -92,7 +92,7 @@ def __init__(self, batch_type=None, timestamp=None, consistency=None, execute_on :param batch_type: (optional) One of batch type values available through BatchType enum :type batch_type: str or None :param timestamp: (optional) A datetime or timedelta object with desired timestamp to be applied - to the batch iff. + to the batch transaction. :type timestamp: datetime or timedelta or None :param consistency: (optional) One of consistency values ("ANY", "ONE", "QUORUM" etc) :type consistency: str or None diff --git a/cqlengine/tests/base.py b/cqlengine/tests/base.py index 96dda91e74..dea0cc191b 100644 --- a/cqlengine/tests/base.py +++ b/cqlengine/tests/base.py @@ -3,11 +3,8 @@ import sys import six from cqlengine.connection import get_session -from cqlengine import connection -CASSANDRA_VERSION = 20 #int(os.environ['CASSANDRA_VERSION']) - -connection.setup(['192.168.56.103'], 'cqlengine_test') +CASSANDRA_VERSION = int(os.environ['CASSANDRA_VERSION']) class BaseCassEngTestCase(TestCase): diff --git a/setup.py b/setup.py index 0b9a9af35e..cf35583c32 100644 --- a/setup.py +++ b/setup.py @@ -17,7 +17,7 @@ """ setup( - name='vk-cqlengine', + name='cqlengine', version=version, description='Cassandra CQL 3 Object Mapper for Python', long_description=long_desc, @@ -35,7 +35,7 @@ author='Blake Eggleston, Jon Haddad', author_email='bdeggleston@gmail.com, jon@jonhaddad.com', url='https://github.com/cqlengine/cqlengine', - license='Private', + license='BSD', packages=find_packages(), include_package_data=True, ) From 5f2f1f7d2353fd8f3940ab20c6ab575dcf7df265 Mon Sep 17 00:00:00 2001 From: timmartin19 Date: Mon, 20 Oct 2014 12:02:04 -0400 Subject: [PATCH 1022/3726] Removed unnecessary import statement --- cqlengine/connection.py | 1 - 1 file changed, 1 deletion(-) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index 39c9e9d295..444400d5b0 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -5,7 +5,6 @@ from collections import namedtuple from cassandra.cluster import Cluster, NoHostAvailable from cassandra.query import SimpleStatement, Statement -from cqlengine.exceptions import LWTException import six try: From 72a17063bf9eb1e2ac9b46866d09b93bfa3e8d88 Mon Sep 17 00:00:00 2001 From: Alon Diamant Date: Mon, 27 Oct 2014 13:44:19 +0200 Subject: [PATCH 1023/3726] Fix to documentation of BatchQuery --- cqlengine/query.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cqlengine/query.py b/cqlengine/query.py index fd2f0072cf..7265f7c7db 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -96,7 +96,7 @@ def __init__(self, batch_type=None, timestamp=None, consistency=None, execute_on to the batch transaction. :type timestamp: datetime or timedelta or None :param consistency: (optional) One of consistency values ("ANY", "ONE", "QUORUM" etc) - :type consistency: str or None + :type consistency: The :class:`.ConsistencyLevel` to be used for the batch query, or None. :param execute_on_exception: (Defaults to False) Indicates that when the BatchQuery instance is used as a context manager the queries accumulated within the context must be executed despite encountering an error within the context. By default, any exception raised from within From fa82a8755f070318cec3df476e4046fbc1fbcdd8 Mon Sep 17 00:00:00 2001 From: "Andrei V. Ostapenko" Date: Tue, 4 Nov 2014 17:53:03 +0200 Subject: [PATCH 1024/3726] Fixes select schema peers query Extends select schema peers query so it selects 'peer' field and it can be used if rpc_address='0.0.0.0' --- cassandra/cluster.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 4e099b1d72..ea150d0681 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1713,7 +1713,7 @@ class ControlConnection(object): _SELECT_PEERS = "SELECT peer, data_center, rack, tokens, rpc_address, schema_version FROM system.peers" _SELECT_LOCAL = "SELECT cluster_name, data_center, rack, tokens, partitioner, schema_version FROM system.local WHERE key='local'" - _SELECT_SCHEMA_PEERS = "SELECT rpc_address, schema_version FROM system.peers" + _SELECT_SCHEMA_PEERS = "SELECT rpc_address, peer, schema_version FROM system.peers" _SELECT_SCHEMA_LOCAL = "SELECT schema_version FROM system.local WHERE key='local'" _is_shutdown = False From bb7a3379816bd240f2bab9f72ed13233bd72ceac Mon Sep 17 00:00:00 2001 From: Lucas Chi Date: Fri, 7 Nov 2014 15:31:25 -0500 Subject: [PATCH 1025/3726] adds tests for preserving None in bool validation --- cqlengine/tests/columns/test_validation.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/cqlengine/tests/columns/test_validation.py b/cqlengine/tests/columns/test_validation.py index dc14eb6150..9c327e4eba 100644 --- a/cqlengine/tests/columns/test_validation.py +++ b/cqlengine/tests/columns/test_validation.py @@ -99,7 +99,22 @@ def test_default_is_set(self): tmp2 = self.BoolDefaultValueTest.get(test_id=1) self.assertEqual(True, tmp2.stuff) +class TestBoolValidation(BaseCassEngTestCase): + class BoolValidationTest(Model): + __keyspace__ = 'test' + test_id = Integer(primary_key=True) + bool_column = Boolean() + + @classmethod + def setUpClass(cls): + super(TestBoolValidation, cls).setUpClass() + sync_table(cls.BoolValidationTest) + + def test_validation_preserves_none(self): + test_obj = self.BoolValidationTest(test_id=1) + test_obj.validate() + self.assertIsNone(test_obj.bool_column) class TestVarInt(BaseCassEngTestCase): class VarIntTest(Model): From c8d68bb5053abb6da846505057ad46599a88c5e6 Mon Sep 17 00:00:00 2001 From: Lucas Chi Date: Fri, 7 Nov 2014 16:26:18 -0500 Subject: [PATCH 1026/3726] none check before coercing to bool --- cqlengine/columns.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index 056853ab98..786dd1ee4f 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -457,7 +457,11 @@ class Boolean(Column): def validate(self, value): """ Always returns a Python boolean. """ value = super(Boolean, self).validate(value) - return bool(value) + + if value is not None: + value = bool(value) + + return value def to_python(self, value): return self.validate(value) From cdcb78d378c51b5c0721b4f6051a48c1d2e500e9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andreas=20St=C3=BChrk?= Date: Wed, 12 Nov 2014 12:50:21 +0100 Subject: [PATCH 1027/3726] Fix typo in getting started guide. --- docs/getting_started.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/getting_started.rst b/docs/getting_started.rst index 65969f884c..1d7ce2acc6 100644 --- a/docs/getting_started.rst +++ b/docs/getting_started.rst @@ -114,7 +114,7 @@ examples are equivalent: .. code-block:: python rows = session.execute('SELECT name, age, email FROM users') - for row in row: + for row in rows: print row[0], row[1], row[2] If you prefer another result format, such as a ``dict`` per row, you From 0dc60487da94f618832121d579d7ec72a47fcfed Mon Sep 17 00:00:00 2001 From: Lucas Chi Date: Wed, 12 Nov 2014 16:22:25 -0500 Subject: [PATCH 1028/3726] fixes blind set add w/ empty set --- cqlengine/statements.py | 27 ++++++++++++------- .../tests/statements/test_update_statement.py | 12 ++++++++- 2 files changed, 29 insertions(+), 10 deletions(-) diff --git a/cqlengine/statements.py b/cqlengine/statements.py index a6cdf4b3e7..7737c3dabd 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -173,15 +173,18 @@ def __init__(self, field, value, operation=None, previous=None, column=None): def __unicode__(self): qs = [] ctx_id = self.context_id - if self.previous is None and not (self._assignments or self._additions or self._removals): + if (self.previous is None and + self._assignments is None and + self._additions is None and + self._removals is None): qs += ['"{}" = %({})s'.format(self.field, ctx_id)] - if self._assignments: + if self._assignments is not None: qs += ['"{}" = %({})s'.format(self.field, ctx_id)] ctx_id += 1 - if self._additions: + if self._additions is not None: qs += ['"{0}" = "{0}" + %({1})s'.format(self.field, ctx_id)] ctx_id += 1 - if self._removals: + if self._removals is not None: qs += ['"{0}" = "{0}" - %({1})s'.format(self.field, ctx_id)] return ', '.join(qs) @@ -204,22 +207,28 @@ def _analyze(self): def get_context_size(self): if not self._analyzed: self._analyze() - if self.previous is None and not (self._assignments or self._additions or self._removals): + if (self.previous is None and + self._assignments is None and + self._additions is None and + self._removals is None): return 1 return int(bool(self._assignments)) + int(bool(self._additions)) + int(bool(self._removals)) def update_context(self, ctx): if not self._analyzed: self._analyze() ctx_id = self.context_id - if self.previous is None and not (self._assignments or self._additions or self._removals): + if (self.previous is None and + self._assignments is None and + self._additions is None and + self._removals is None): ctx[str(ctx_id)] = self._to_database({}) - if self._assignments: + if self._assignments is not None: ctx[str(ctx_id)] = self._to_database(self._assignments) ctx_id += 1 - if self._additions: + if self._additions is not None: ctx[str(ctx_id)] = self._to_database(self._additions) ctx_id += 1 - if self._removals: + if self._removals is not None: ctx[str(ctx_id)] = self._to_database(self._removals) diff --git a/cqlengine/tests/statements/test_update_statement.py b/cqlengine/tests/statements/test_update_statement.py index f75a953008..04e40783eb 100644 --- a/cqlengine/tests/statements/test_update_statement.py +++ b/cqlengine/tests/statements/test_update_statement.py @@ -1,6 +1,7 @@ from unittest import TestCase -from cqlengine.statements import UpdateStatement, WhereClause, AssignmentClause +from cqlengine.columns import Set from cqlengine.operators import * +from cqlengine.statements import UpdateStatement, WhereClause, AssignmentClause, SetUpdateClause import six class UpdateStatementTests(TestCase): @@ -40,3 +41,12 @@ def test_additional_rendering(self): us.add_where_clause(WhereClause('a', EqualsOperator(), 'x')) self.assertIn('USING TTL 60', six.text_type(us)) + def test_update_set_add(self): + us = UpdateStatement('table') + us.add_assignment_clause(SetUpdateClause('a', Set.Quoter({1}), operation='add')) + self.assertEqual(six.text_type(us), 'UPDATE table SET "a" = "a" + %(0)s') + + def test_update_set_add_does_not_assign(self): + us = UpdateStatement('table') + us.add_assignment_clause(SetUpdateClause('a', Set.Quoter(set()), operation='add')) + self.assertEqual(six.text_type(us), 'UPDATE table SET "a" = "a" + %(0)s') From ccf694c20142e41034b43e5e33bf373e107adba3 Mon Sep 17 00:00:00 2001 From: Lucas Chi Date: Wed, 12 Nov 2014 16:22:46 -0500 Subject: [PATCH 1029/3726] remove unused import --- cqlengine/tests/columns/test_validation.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/cqlengine/tests/columns/test_validation.py b/cqlengine/tests/columns/test_validation.py index dc14eb6150..623995818e 100644 --- a/cqlengine/tests/columns/test_validation.py +++ b/cqlengine/tests/columns/test_validation.py @@ -30,8 +30,6 @@ from cqlengine.management import sync_table, drop_table from cqlengine.models import Model -import sys - class TestDatetime(BaseCassEngTestCase): class DatetimeTest(Model): From c388365236ceee4184b42a91c470202afafb6657 Mon Sep 17 00:00:00 2001 From: Lucas Chi Date: Wed, 12 Nov 2014 17:03:37 -0500 Subject: [PATCH 1030/3726] fix list append and prepend with empty lists --- cqlengine/statements.py | 8 ++++---- .../tests/statements/test_update_statement.py | 17 +++++++++++++++-- 2 files changed, 19 insertions(+), 6 deletions(-) diff --git a/cqlengine/statements.py b/cqlengine/statements.py index 7737c3dabd..633472d0ba 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -248,11 +248,11 @@ def __unicode__(self): qs += ['"{}" = %({})s'.format(self.field, ctx_id)] ctx_id += 1 - if self._prepend: + if self._prepend is not None: qs += ['"{0}" = %({1})s + "{0}"'.format(self.field, ctx_id)] ctx_id += 1 - if self._append: + if self._append is not None: qs += ['"{0}" = "{0}" + %({1})s'.format(self.field, ctx_id)] return ', '.join(qs) @@ -267,13 +267,13 @@ def update_context(self, ctx): if self._assignments is not None: ctx[str(ctx_id)] = self._to_database(self._assignments) ctx_id += 1 - if self._prepend: + if self._prepend is not None: # CQL seems to prepend element at a time, starting # with the element at idx 0, we can either reverse # it here, or have it inserted in reverse ctx[str(ctx_id)] = self._to_database(list(reversed(self._prepend))) ctx_id += 1 - if self._append: + if self._append is not None: ctx[str(ctx_id)] = self._to_database(self._append) def _analyze(self): diff --git a/cqlengine/tests/statements/test_update_statement.py b/cqlengine/tests/statements/test_update_statement.py index 04e40783eb..e73961141b 100644 --- a/cqlengine/tests/statements/test_update_statement.py +++ b/cqlengine/tests/statements/test_update_statement.py @@ -1,9 +1,12 @@ from unittest import TestCase -from cqlengine.columns import Set +from cqlengine.columns import Set, List from cqlengine.operators import * -from cqlengine.statements import UpdateStatement, WhereClause, AssignmentClause, SetUpdateClause +from cqlengine.statements import (UpdateStatement, WhereClause, + AssignmentClause, SetUpdateClause, + ListUpdateClause) import six + class UpdateStatementTests(TestCase): def test_table_rendering(self): @@ -50,3 +53,13 @@ def test_update_set_add_does_not_assign(self): us = UpdateStatement('table') us.add_assignment_clause(SetUpdateClause('a', Set.Quoter(set()), operation='add')) self.assertEqual(six.text_type(us), 'UPDATE table SET "a" = "a" + %(0)s') + + def test_update_list_prepend_with_empty_list(self): + us = UpdateStatement('table') + us.add_assignment_clause(ListUpdateClause('a', List.Quoter([]), operation='prepend')) + self.assertEqual(six.text_type(us), 'UPDATE table SET "a" = %(0)s + "a"') + + def test_update_list_append_with_empty_list(self): + us = UpdateStatement('table') + us.add_assignment_clause(ListUpdateClause('a', List.Quoter([]), operation='append')) + self.assertEqual(six.text_type(us), 'UPDATE table SET "a" = "a" + %(0)s') From c2590b46b85cdae1a6e1431481f05ba92b373221 Mon Sep 17 00:00:00 2001 From: Lucas Chi Date: Wed, 12 Nov 2014 17:07:55 -0500 Subject: [PATCH 1031/3726] add set removal test and update test names --- cqlengine/tests/statements/test_update_statement.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/cqlengine/tests/statements/test_update_statement.py b/cqlengine/tests/statements/test_update_statement.py index e73961141b..ab5aeec014 100644 --- a/cqlengine/tests/statements/test_update_statement.py +++ b/cqlengine/tests/statements/test_update_statement.py @@ -49,11 +49,16 @@ def test_update_set_add(self): us.add_assignment_clause(SetUpdateClause('a', Set.Quoter({1}), operation='add')) self.assertEqual(six.text_type(us), 'UPDATE table SET "a" = "a" + %(0)s') - def test_update_set_add_does_not_assign(self): + def test_update_empty_set_add_does_not_assign(self): us = UpdateStatement('table') us.add_assignment_clause(SetUpdateClause('a', Set.Quoter(set()), operation='add')) self.assertEqual(six.text_type(us), 'UPDATE table SET "a" = "a" + %(0)s') + def test_update_empty_set_removal_does_not_assign(self): + us = UpdateStatement('table') + us.add_assignment_clause(SetUpdateClause('a', Set.Quoter(set()), operation='remove')) + self.assertEqual(six.text_type(us), 'UPDATE table SET "a" = "a" - %(0)s') + def test_update_list_prepend_with_empty_list(self): us = UpdateStatement('table') us.add_assignment_clause(ListUpdateClause('a', List.Quoter([]), operation='prepend')) From 65c7680e56cbe66065a6f2d5faa4df6d35f3a615 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 13 Nov 2014 16:56:20 -0800 Subject: [PATCH 1032/3726] allow for multiple callback and errbacks --- cassandra/cluster.py | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index fa5defd1ac..6bb8b77b8f 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2334,8 +2334,8 @@ class ResponseFuture(object): _final_result = _NOT_SET _final_exception = None _query_trace = None - _callback = None - _errback = None + _callbacks = None + _errbacks = None _current_host = None _current_pool = None _connection = None @@ -2358,6 +2358,8 @@ def __init__(self, session, message, query, default_timeout=None, metrics=None, self._make_query_plan() self._event = Event() self._errors = {} + self._callbacks = [] + self._errbacks = [] def _make_query_plan(self): # convert the list/generator/etc to an iterator so that subsequent @@ -2657,8 +2659,10 @@ def _set_final_result(self, response): self._final_result = response self._event.set() - if self._callback: - fn, args, kwargs = self._callback + + # apply each callback + for callback in self._callbacks: + fn, args, kwargs = callback fn(response, *args, **kwargs) def _set_final_exception(self, response): @@ -2668,8 +2672,9 @@ def _set_final_exception(self, response): with self._callback_lock: self._final_exception = response self._event.set() - if self._errback: - fn, args, kwargs = self._errback + + for errback in self._errbacks: + fn, args, kwargs = errback fn(response, *args, **kwargs) def _retry(self, reuse_connection, consistency_level): @@ -2797,7 +2802,7 @@ def add_callback(self, fn, *args, **kwargs): if self._final_result is not _NOT_SET: run_now = True else: - self._callback = (fn, args, kwargs) + self._callbacks.append((fn, args, kwargs)) if run_now: fn(self._final_result, *args, **kwargs) return self @@ -2813,7 +2818,7 @@ def add_errback(self, fn, *args, **kwargs): if self._final_exception: run_now = True else: - self._errback = (fn, args, kwargs) + self._errbacks.append((fn, args, kwargs)) if run_now: fn(self._final_exception, *args, **kwargs) return self @@ -2848,8 +2853,8 @@ def add_callbacks(self, callback, errback, def clear_callbacks(self): with self._callback_lock: - self._callback = None - self._errback = None + self._callback = [] + self._errback = [] def __str__(self): result = "(no result yet)" if self._final_result is _NOT_SET else self._final_result From 9a1b1f0230727c061185f3c34f925a0633e0e0bf Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 14 Nov 2014 12:04:52 -0600 Subject: [PATCH 1033/3726] Fix routing key encoding for compound primary keys - make component size big-endian - include component string size in format string --- cassandra/query.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cassandra/query.py b/cassandra/query.py index 11161afe95..791b00692d 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -507,7 +507,8 @@ def routing_key(self): components = [] for statement_index in routing_indexes: val = self.values[statement_index] - components.append(struct.pack("HsB", len(val), val, 0)) + l = len(val) + components.append(struct.pack(">H%dsB" % l, l, val, 0)) self._routing_key = b"".join(components) From e9b69a62b24d731c0425d5d52a774fa6334b6217 Mon Sep 17 00:00:00 2001 From: Alon Diamant Date: Fri, 14 Nov 2014 20:06:13 +0200 Subject: [PATCH 1034/3726] Added LOCAL_ONE consistency level constant --- cqlengine/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cqlengine/__init__.py b/cqlengine/__init__.py index ef273d9664..0961afd333 100644 --- a/cqlengine/__init__.py +++ b/cqlengine/__init__.py @@ -27,6 +27,7 @@ TWO = ConsistencyLevel.TWO THREE = ConsistencyLevel.THREE QUORUM = ConsistencyLevel.QUORUM +LOCAL_ONE = ConsistencyLevel.LOCAL_ONE LOCAL_QUORUM = ConsistencyLevel.LOCAL_QUORUM EACH_QUORUM = ConsistencyLevel.EACH_QUORUM ALL = ConsistencyLevel.ALL From c3151ee4acf95b5518aff08c3b7d321573568ba8 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Fri, 14 Nov 2014 12:31:25 -0800 Subject: [PATCH 1035/3726] for some reason int_map was hard coded --- cqlengine/statements.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cqlengine/statements.py b/cqlengine/statements.py index d73cf1a99e..4a6f318ffc 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -378,7 +378,7 @@ def __unicode__(self): ctx_id = self.context_id if self.previous is None and not self._updates: - qs += ['"int_map" = %({})s'.format(ctx_id)] + qs += ['"{}" = %({})s'.format(self.field, ctx_id)] else: for _ in self._updates or []: qs += ['"{}"[%({})s] = %({})s'.format(self.field, ctx_id, ctx_id + 1)] From df6aa48696face08f024e4034b3a08b90aa47f68 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Tue, 18 Nov 2014 12:05:02 -0800 Subject: [PATCH 1036/3726] version bump --- cqlengine/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cqlengine/VERSION b/cqlengine/VERSION index 1cf0537c34..5a03fb737b 100644 --- a/cqlengine/VERSION +++ b/cqlengine/VERSION @@ -1 +1 @@ -0.19.0 +0.20.0 From df5790f8225c5d4aa38050cb496ae2847f196ff8 Mon Sep 17 00:00:00 2001 From: Tyler Hobbs Date: Tue, 18 Nov 2014 15:35:14 -0600 Subject: [PATCH 1037/3726] Temporary fix for CASSANDRA-8326 This has not been reviewed or well tested. It's a temporary fix until a proper patch can be put together and tested. --- cassandra/metadata.py | 51 ++++++++++++++++++++++++++++++++++++++----- 1 file changed, 45 insertions(+), 6 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index ec9062a712..4c6b7ec898 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -226,7 +226,18 @@ def _build_table_metadata(self, keyspace_metadata, row, col_rows, trigger_rows): num_column_name_components = len(column_name_types) last_col = column_name_types[-1] - column_aliases = json.loads(row["column_aliases"]) + column_aliases = row.get("column_aliases", None) + + clustering_rows = [row for row in col_rows.get(cfname) + if row.get('type', None) == "clustering_key"] + if len(clustering_rows) > 1: + clustering_rows = sorted(clustering_rows, key=lambda row: row.get('component_index')) + + if column_aliases is not None: + column_aliases = json.loads(column_aliases) + else: + column_aliases = [row.get('column_name') for row in clustering_rows] + if is_composite: if issubclass(last_col, types.ColumnToCollectionType): # collections @@ -258,10 +269,25 @@ def _build_table_metadata(self, keyspace_metadata, row, col_rows, trigger_rows): # partition key key_aliases = row.get("key_aliases") - key_aliases = json.loads(key_aliases) if key_aliases else [] + partition_rows = [row for row in col_rows.get(cfname) + if row.get('type', None) == "partition_key"] + + if len(partition_rows) > 1: + partition_rows = sorted(partition_rows, key=lambda row: row.get('component_index')) + + if key_aliases is not None: + json.loads(key_aliases) + else: + # In 2.0+, we can use the 'type' column. In 3.0+, we have to use it. + key_aliases = [row.get('column_name') for row in partition_rows] + + key_validator = row.get("key_validator") + if key_validator is not None: + key_type = types.lookup_casstype(key_validator) + key_types = key_type.subtypes if issubclass(key_type, types.CompositeType) else [key_type] + else: + key_types = [types.lookup_casstype(row.get('validator')) for row in partition_rows] - key_type = types.lookup_casstype(row["key_validator"]) - key_types = key_type.subtypes if issubclass(key_type, types.CompositeType) else [key_type] for i, col_type in enumerate(key_types): if len(key_aliases) > i: column_name = key_aliases[i] @@ -287,11 +313,24 @@ def _build_table_metadata(self, keyspace_metadata, row, col_rows, trigger_rows): # value alias (if present) if has_value: - validator = types.lookup_casstype(row["default_validator"]) + value_alias_rows = [row for row in col_rows.get(cfname) + if row.get('type', None) == "compact_value"] + if not key_aliases: # TODO are we checking the right thing here? value_alias = "value" else: - value_alias = row["value_alias"] + value_alias = row.get("value_alias", None) + if value_alias is None: + # In 2.0+, we can use the 'type' column. In 3.0+, we have to use it. + assert len(value_alias_rows) == 1 + value_alias = value_alias_rows[0].get('column_name') + + default_validator = row.get("default_validator") + if default_validator: + validator = types.lookup_casstype(default_validator) + else: + assert len(value_alias_rows) == 1 + validator = types.lookup_casstype(value_alias_rows[0].get('validator')) col = ColumnMetadata(table_meta, value_alias, validator) table_meta.columns[value_alias] = col From 738f166cec6604105894dc3dd7ae9d1755a179c3 Mon Sep 17 00:00:00 2001 From: Roey Berman Date: Mon, 11 Aug 2014 16:58:23 +0300 Subject: [PATCH 1038/3726] add query timeout feature --- cqlengine/connection.py | 28 +++++------- cqlengine/models.py | 21 +++++++-- cqlengine/query.py | 39 +++++++++++----- cqlengine/tests/query/test_batch_query.py | 16 +++++++ cqlengine/tests/query/test_queryset.py | 55 +++++++++++++++++++++-- docs/topics/queryset.rst | 47 +++++++++++++++++++ 6 files changed, 171 insertions(+), 35 deletions(-) diff --git a/cqlengine/connection.py b/cqlengine/connection.py index 444400d5b0..34f8da3acb 100644 --- a/cqlengine/connection.py +++ b/cqlengine/connection.py @@ -1,26 +1,21 @@ #http://pypi.python.org/pypi/cql/1.0.4 #http://code.google.com/a/apache-extras.org/p/cassandra-dbapi2 / #http://cassandra.apache.org/doc/cql/CQL.html - +from __future__ import absolute_import from collections import namedtuple -from cassandra.cluster import Cluster, NoHostAvailable -from cassandra.query import SimpleStatement, Statement -import six - -try: - import Queue as queue -except ImportError: - # python 3 - import queue +from cassandra.cluster import Cluster, _NOT_SET, NoHostAvailable +from cassandra.query import SimpleStatement, Statement, dict_factory +from cqlengine.statements import BaseCQLStatement +from cqlengine.exceptions import CQLEngineException, UndefinedKeyspaceException +from cassandra import ConsistencyLevel +import six import logging -from cqlengine.exceptions import CQLEngineException, UndefinedKeyspaceException -from cassandra import ConsistencyLevel -from cqlengine.statements import BaseCQLStatement -from cassandra.query import dict_factory LOG = logging.getLogger('cqlengine.cql') +NOT_SET = _NOT_SET # required for passing timeout to Session.execute + class CQLConnectionError(CQLEngineException): pass @@ -85,7 +80,8 @@ def setup( raise session.row_factory = dict_factory -def execute(query, params=None, consistency_level=None): + +def execute(query, params=None, consistency_level=None, timeout=NOT_SET): handle_lazy_connect() @@ -109,7 +105,7 @@ def execute(query, params=None, consistency_level=None): LOG.info(query.query_string) params = params or {} - result = session.execute(query, params) + result = session.execute(query, params, timeout=timeout) return result diff --git a/cqlengine/models.py b/cqlengine/models.py index 5003b3c018..d88836b1fc 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -4,7 +4,7 @@ from cqlengine import columns from cqlengine.exceptions import ModelException, CQLEngineException, ValidationError -from cqlengine.query import ModelQuerySet, DMLQuery, AbstractQueryableColumn +from cqlengine.query import ModelQuerySet, DMLQuery, AbstractQueryableColumn, NOT_SET from cqlengine.query import DoesNotExist as _DoesNotExist from cqlengine.query import MultipleObjectsReturned as _MultipleObjectsReturned @@ -342,6 +342,7 @@ def __init__(self, **values): # that update should be used when persisting changes self._is_persisted = False self._batch = None + self._timeout = NOT_SET def __repr__(self): @@ -564,6 +565,11 @@ def filter(cls, *args, **kwargs): def get(cls, *args, **kwargs): return cls.objects.get(*args, **kwargs) + def timeout(self, timeout): + assert self._batch is None, 'Setting both timeout and batch is not supported' + self._timeout = timeout + return self + def save(self): # handle polymorphic models if self._is_polymorphic: @@ -580,7 +586,8 @@ def save(self): timestamp=self._timestamp, consistency=self.__consistency__, if_not_exists=self._if_not_exists, - transaction=self._transaction).save() + transaction=self._transaction, + timeout=self._timeout).save() #reset the value managers for v in self._values.values(): @@ -619,7 +626,8 @@ def update(self, **values): ttl=self._ttl, timestamp=self._timestamp, consistency=self.__consistency__, - transaction=self._transaction).update() + transaction=self._transaction, + timeout=self._timeout).update() #reset the value managers for v in self._values.values(): @@ -633,7 +641,11 @@ def update(self, **values): def delete(self): """ Deletes this instance """ - self.__dmlquery__(self.__class__, self, batch=self._batch, timestamp=self._timestamp, consistency=self.__consistency__).delete() + self.__dmlquery__(self.__class__, self, + batch=self._batch, + timestamp=self._timestamp, + consistency=self.__consistency__, + timeout=self._timeout).delete() def get_changed_columns(self): """ returns a list of the columns that have been updated since instantiation or save """ @@ -644,6 +656,7 @@ def _class_batch(cls, batch): return cls.objects.batch(batch) def _inst_batch(self, batch): + assert self._timeout is NOT_SET, 'Setting both timeout and batch is not supported' self._batch = batch return self diff --git a/cqlengine/query.py b/cqlengine/query.py index b8c563e32a..5f4c2cb593 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -1,10 +1,12 @@ +from __future__ import absolute_import import copy import time from datetime import datetime, timedelta from cqlengine import BaseContainerColumn, Map, columns from cqlengine.columns import Counter, List, Set -from cqlengine.connection import execute +from .connection import execute, NOT_SET + from cqlengine.exceptions import CQLEngineException, ValidationError, LWTException from cqlengine.functions import Token, BaseQueryFunction, QueryValue, UnicodeMixin @@ -87,7 +89,8 @@ class BatchQuery(object): """ _consistency = None - def __init__(self, batch_type=None, timestamp=None, consistency=None, execute_on_exception=False): + def __init__(self, batch_type=None, timestamp=None, consistency=None, execute_on_exception=False, + timeout=NOT_SET): """ :param batch_type: (optional) One of batch type values available through BatchType enum :type batch_type: str or None @@ -101,10 +104,9 @@ def __init__(self, batch_type=None, timestamp=None, consistency=None, execute_on encountering an error within the context. By default, any exception raised from within the context scope will cause the batched queries not to be executed. :type execute_on_exception: bool - :param callbacks: A list of functions to be executed after the batch executes. Note, that if the batch - does not execute, the callbacks are not executed. This, thus, effectively is a list of "on success" - callback handlers. If defined, must be a collection of callables. - :type callbacks: list or set or tuple + :param timeout: (optional) Timeout for the entire batch (in seconds), if not specified fallback + to default session timeout + :type timeout: float or None """ self.queries = [] self.batch_type = batch_type @@ -113,6 +115,7 @@ def __init__(self, batch_type=None, timestamp=None, consistency=None, execute_on self.timestamp = timestamp self._consistency = consistency self._execute_on_exception = execute_on_exception + self._timeout = timeout self._callbacks = [] def add_query(self, query): @@ -181,7 +184,7 @@ def execute(self): query_list.append('APPLY BATCH;') - tmp = execute('\n'.join(query_list), parameters, self._consistency) + tmp = execute('\n'.join(query_list), parameters, self._consistency, self._timeout) check_applied(tmp) self.queries = [] @@ -235,6 +238,7 @@ def __init__(self, model): self._consistency = None self._timestamp = None self._if_not_exists = False + self._timeout = NOT_SET @property def column_family_name(self): @@ -244,7 +248,7 @@ def _execute(self, q): if self._batch: return self._batch.add_query(q) else: - result = execute(q, consistency_level=self._consistency) + result = execute(q, consistency_level=self._consistency, timeout=self._timeout) if self._transaction: check_applied(result) return result @@ -269,6 +273,8 @@ def __deepcopy__(self, memo): # fly off into other batch instances which are never # executed, thx @dokai clone.__dict__[k] = self._batch + elif k == '_timeout': + clone.__dict__[k] = self._timeout else: clone.__dict__[k] = copy.deepcopy(v, memo) @@ -656,6 +662,15 @@ def __eq__(self, q): def __ne__(self, q): return not (self != q) + def timeout(self, timeout): + """ + :param timeout: Timeout for the query (in seconds) + :type timeout: float or None + """ + clone = copy.deepcopy(self) + clone._timeout = timeout + return clone + class ResultObject(dict): """ @@ -835,7 +850,8 @@ class DMLQuery(object): _timestamp = None _if_not_exists = False - def __init__(self, model, instance=None, batch=None, ttl=None, consistency=None, timestamp=None, if_not_exists=False, transaction=None): + def __init__(self, model, instance=None, batch=None, ttl=None, consistency=None, timestamp=None, + if_not_exists=False, transaction=None, timeout=NOT_SET): self.model = model self.column_family_name = self.model.column_family_name() self.instance = instance @@ -845,12 +861,13 @@ def __init__(self, model, instance=None, batch=None, ttl=None, consistency=None, self._timestamp = timestamp self._if_not_exists = if_not_exists self._transaction = transaction + self._timeout = timeout def _execute(self, q): if self._batch: return self._batch.add_query(q) else: - tmp = execute(q, consistency_level=self._consistency) + tmp = execute(q, consistency_level=self._consistency, timeout=self._timeout) if self._if_not_exists or self._transaction: check_applied(tmp) return tmp @@ -994,5 +1011,3 @@ def delete(self): col.to_database(getattr(self.instance, name)) )) self._execute(ds) - - diff --git a/cqlengine/tests/query/test_batch_query.py b/cqlengine/tests/query/test_batch_query.py index 9f3ecb5d6a..74d2724a53 100644 --- a/cqlengine/tests/query/test_batch_query.py +++ b/cqlengine/tests/query/test_batch_query.py @@ -3,9 +3,13 @@ from uuid import uuid4 import random from cqlengine import Model, columns +from cqlengine.connection import NOT_SET from cqlengine.management import drop_table, sync_table from cqlengine.query import BatchQuery, DMLQuery from cqlengine.tests.base import BaseCassEngTestCase +from cassandra.cluster import Session +import mock + class TestMultiKeyModel(Model): __keyspace__ = 'test' @@ -169,3 +173,15 @@ def test_batch_execute_on_exception_skips_if_not_specified(self): # should be 0 because the batch should not execute self.assertEqual(0, len(obj)) + + def test_batch_execute_timeout(self): + with mock.patch.object(Session, 'execute', autospec=True) as mock_execute: + with BatchQuery(timeout=1) as b: + BatchQueryLogModel.batch(b).create(k=2, v=2) + mock_execute.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY, timeout=1) + + def test_batch_execute_no_timeout(self): + with mock.patch.object(Session, 'execute', autospec=True) as mock_execute: + with BatchQuery() as b: + BatchQueryLogModel.batch(b).create(k=2, v=2) + mock_execute.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY, timeout=NOT_SET) diff --git a/cqlengine/tests/query/test_queryset.py b/cqlengine/tests/query/test_queryset.py index 6482cc7f87..00b1be49ab 100644 --- a/cqlengine/tests/query/test_queryset.py +++ b/cqlengine/tests/query/test_queryset.py @@ -1,15 +1,16 @@ +from __future__ import absolute_import from datetime import datetime import time from unittest import TestCase, skipUnless from uuid import uuid1, uuid4 import uuid +from cassandra.cluster import Session from cqlengine.tests.base import BaseCassEngTestCase +from cqlengine.connection import NOT_SET import mock -from cqlengine.exceptions import ModelException from cqlengine import functions -from cqlengine.management import sync_table, drop_table, sync_table -from cqlengine.management import drop_table +from cqlengine.management import sync_table, drop_table from cqlengine.models import Model from cqlengine import columns from cqlengine import query @@ -707,3 +708,51 @@ class PagingTest(Model): assert len(results) == 2 +class ModelQuerySetTimeoutTestCase(BaseQuerySetUsage): + def test_default_timeout(self): + with mock.patch.object(Session, 'execute', autospec=True) as mock_execute: + list(TestModel.objects()) + mock_execute.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY, timeout=NOT_SET) + + def test_float_timeout(self): + with mock.patch.object(Session, 'execute', autospec=True) as mock_execute: + list(TestModel.objects().timeout(0.5)) + mock_execute.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY, timeout=0.5) + + def test_none_timeout(self): + with mock.patch.object(Session, 'execute', autospec=True) as mock_execute: + list(TestModel.objects().timeout(None)) + mock_execute.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY, timeout=None) + + +class DMLQueryTimeoutTestCase(BaseQuerySetUsage): + def setUp(self): + self.model = TestModel(test_id=1, attempt_id=1, description='timeout test') + super(DMLQueryTimeoutTestCase, self).setUp() + + def test_default_timeout(self): + with mock.patch.object(Session, 'execute', autospec=True) as mock_execute: + self.model.save() + mock_execute.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY, timeout=NOT_SET) + + def test_float_timeout(self): + with mock.patch.object(Session, 'execute', autospec=True) as mock_execute: + self.model.timeout(0.5).save() + mock_execute.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY, timeout=0.5) + + def test_none_timeout(self): + with mock.patch.object(Session, 'execute', autospec=True) as mock_execute: + self.model.timeout(None).save() + mock_execute.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY, timeout=None) + + def test_timeout_then_batch(self): + b = query.BatchQuery() + m = self.model.timeout(None) + with self.assertRaises(AssertionError): + m.batch(b) + + def test_batch_then_timeout(self): + b = query.BatchQuery() + m = self.model.batch(b) + with self.assertRaises(AssertionError): + m.timeout(0.5) diff --git a/docs/topics/queryset.rst b/docs/topics/queryset.rst index 2466decee6..bd26bc21df 100644 --- a/docs/topics/queryset.rst +++ b/docs/topics/queryset.rst @@ -592,6 +592,53 @@ QuerySet method reference Row.objects(row_id=5).update(map_column__update={1: 2, 3: 4}) +Per Query Timeouts +=================== + +By default all queries are executed with the timeout defined in `~cqlengine.connection.setup()` +The examples below show how to specify a per-query timeout. +A timeout is specified in seconds and can be an int, float or None. +None means no timeout. + + + .. code-block:: python + + class Row(Model): + id = columns.Integer(primary_key=True) + name = columns.Text() + + + Fetch all objects with a timeout of 5 seconds + .. code-block:: python + + Row.objects().timeout(5).all() + + Save a single row with a 50ms timeout + .. code-block:: python + + Row(id=1, name='Jon').timeout(0.05).save() + + Delete a single row with no timeout + .. code-block:: python + + Row(id=1).timeout(None).delete() + + Update a single row with no timeout + .. code-block:: python + + Row(id=1).timeout(None).update(name='Blake') + + Batch query timeouts + .. code-block:: python + + with BatchQuery(timeout=10) as b: + Row(id=1, name='Jon').save() + + + NOTE: You cannot set both timeout and batch at the same time, batch will use the timeout defined in it's constructor. + Setting the timeout on the model is meaningless and will raise an AssertionError. + + Named Tables =================== From 29b10562f6de9aeca3a0fc6f74d0aaf618de9672 Mon Sep 17 00:00:00 2001 From: Roey Berman Date: Mon, 27 Oct 2014 11:42:38 +0200 Subject: [PATCH 1039/3726] docs for timeout - use create() instead of save() --- docs/topics/queryset.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/topics/queryset.rst b/docs/topics/queryset.rst index bd26bc21df..650f7d291c 100644 --- a/docs/topics/queryset.rst +++ b/docs/topics/queryset.rst @@ -613,10 +613,10 @@ None means no timeout. Row.objects().timeout(5).all() - Save a single row with a 50ms timeout + Create a single row with a 50ms timeout .. code-block:: python - Row(id=1, name='Jon').timeout(0.05).save() + Row(id=1, name='Jon').timeout(0.05).create() Delete a single row with no timeout .. code-block:: python @@ -632,7 +632,7 @@ None means no timeout. .. code-block:: python with BatchQuery(timeout=10) as b: - Row(id=1, name='Jon').save() + Row(id=1, name='Jon').create() NOTE: You cannot set both timeout and batch at the same time, batch will use the timeout defined in it's constructor. From b3f64e16b0bfd79e9300b06002309d8aa4e20116 Mon Sep 17 00:00:00 2001 From: Roey Berman Date: Thu, 20 Nov 2014 18:51:33 +0200 Subject: [PATCH 1040/3726] fix docs for per query timeout --- docs/topics/queryset.rst | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/docs/topics/queryset.rst b/docs/topics/queryset.rst index 650f7d291c..79c07e428e 100644 --- a/docs/topics/queryset.rst +++ b/docs/topics/queryset.rst @@ -609,26 +609,31 @@ None means no timeout. Fetch all objects with a timeout of 5 seconds + .. code-block:: python Row.objects().timeout(5).all() Create a single row with a 50ms timeout + .. code-block:: python Row(id=1, name='Jon').timeout(0.05).create() Delete a single row with no timeout + .. code-block:: python Row(id=1).timeout(None).delete() Update a single row with no timeout + .. code-block:: python Row(id=1).timeout(None).update(name='Blake') Batch query timeouts + .. code-block:: python with BatchQuery(timeout=10) as b: From 542e7f6573470aa8d6ec262583d06d162cf4c5f8 Mon Sep 17 00:00:00 2001 From: Anthony Yim Date: Fri, 21 Nov 2014 17:18:29 -0500 Subject: [PATCH 1041/3726] Fixes typo in docstring. --- cqlengine/models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cqlengine/models.py b/cqlengine/models.py index d88836b1fc..d7f928dda9 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -837,7 +837,7 @@ def _get_polymorphic_base(bases): class Model(BaseModel): """ the db name for the column family can be set as the attribute db_name, or - it will be genertaed from the class name + it will be generated from the class name """ __abstract__ = True # __metaclass__ = ModelMetaClass From 623216ef1a33367831fd1cef69ac1dcb430f6cd0 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 24 Nov 2014 10:56:11 -0600 Subject: [PATCH 1042/3726] Update installation.rst Add note to help with issues as reported here: http://stackoverflow.com/questions/27085384/c-extension-for-murmur3partitioner-was-not-compiled --- docs/installation.rst | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docs/installation.rst b/docs/installation.rst index e0d939366d..14fb9b17ba 100644 --- a/docs/installation.rst +++ b/docs/installation.rst @@ -13,12 +13,14 @@ Installation through pip ------------------------ `pip `_ is the suggested tool for installing packages. It will handle installing all python dependencies for the driver at -the same time as the driver itself. To install the driver:: +the same time as the driver itself. To install the driver*:: pip install cassandra-driver You can use ``pip install --pre cassandra-driver`` if you need to install a beta version. +***Note**: if intending to use optional extensions, install the `dependencies <#optional-non-python-dependencies>`_ first. The driver may need to be reinstalled if dependencies are added after the initial installation. + OSX Installation Error ^^^^^^^^^^^^^^^^^^^^^^ If you're installing on OSX and have XCode 5.1 installed, you may see an error like this:: From 7488ec065ab6d66cb535259380e977da08d11a59 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 24 Nov 2014 12:34:55 -0600 Subject: [PATCH 1043/3726] generalized read loop in favor of ssl-only 'drain' tweaking gevent ssl PR WIP --- cassandra/io/geventreactor.py | 17 +++++------------ cassandra/io/libevwrapper.sox | Bin 0 -> 17896 bytes 2 files changed, 5 insertions(+), 12 deletions(-) create mode 100755 cassandra/io/libevwrapper.sox diff --git a/cassandra/io/geventreactor.py b/cassandra/io/geventreactor.py index 07206c71d2..6be43ece17 100644 --- a/cassandra/io/geventreactor.py +++ b/cassandra/io/geventreactor.py @@ -79,9 +79,6 @@ def __init__(self, *args, **kwargs): self._socket = socket.socket(af, socktype, proto) if self.ssl_options: self._socket = ssl.wrap_socket(self._socket, **self.ssl_options) - self._ssl_socket = True - else: - self._ssl_socket = False self._socket.settimeout(1.0) self._socket.connect(sockaddr) sockerr = None @@ -155,15 +152,11 @@ def handle_read(self): return try: - buf = self._socket.recv(self.in_buffer_size) - self._iobuf.write(buf) - if self._ssl_socket: - # We need to drain pending data when dealing with a SSL socket - data_left = self._socket.pending() - while data_left: - buf = self._socket.recv(data_left) - self._iobuf.write(buf) - data_left = self._socket.pending() + while True: + buf = self._socket.recv(self.in_buffer_size) + self._iobuf.write(buf) + if len(buf) < self.in_buffer_size: + break except socket.error as err: if not is_timeout(err): log.debug("Exception during socket recv for %s: %s", self, err) diff --git a/cassandra/io/libevwrapper.sox b/cassandra/io/libevwrapper.sox new file mode 100755 index 0000000000000000000000000000000000000000..8fd23f01b425b802592e745a9ed2b9e929f66875 GIT binary patch literal 17896 zcmeHP4{%(?dEb*IBV!w#sZC6$z%#}?olZ~I#izT= z-IFcF5Y(|1d?N&+C4>?JBB8;wK+JTSdI}|)n!%pq+^pcE9iU?e~4V`|ZAc@7`Pa=kLGq(HTPc<_ICmkryBjl?l-YO6gqW z)g?lh=GKO7Tcv1X5ts-CWB3$sBj$3oY1&q|%>cyfFLIWudyzVUM7UJ3sw@?~ZfKfW zyQ?)@5PAJw_!(98IBO906|^x{_E+p5`7=ALMB2(g&+G35`@4ai5Jp&vX;S}USvwO? z?qm(GzjYK?qyy|kV<6XgZL@?f{9W4sH@e8o>YQ8QAPq;3Mrjj$w)@<>rm^>emHOe61vUB_Mx#Yys zyc)=++0?k{>W2D8y$1xT7AYW0As9PL6sB>yebF*sihS^5A)Wv|bcGPLP?`@uf=uZS z&SW=;yaIW;bog>1;G&q&{xv{!8BjY9{`Q+cdi=gGUpW-Ix##Ij$@f=5=0_H#X9_Xz zOu5h10+Oz}1Zf49iR!LwraF;|MHAJDcq?UVch$NoEXsH*6eijEe2*b~KDJX9%{^J$ z>a5yOw3=)u#3FQLqTS9|%*>ydTVr1~iRHY8=U&p-L>jr}mFLK+*A}DdCtUSKRdpt0u=>f_ zs5-<|OQ~w$@~iQvWeluu`7DtoYoJl{T=?)icrJHtgQk%?V)VUKvH#ZyAk;2|_TRt| z@_%{2<(^~0<&!WtW)@%rycjaP{btbdR-9%qd&K;!Qzi21T3 zLeTj2pSS+M{)tP9cug$%po%b@ZGLPh}So z$+I21+juoHX1@Up%<>K^o7FBNO|FfnYEpzcH}KZ{&_Tcfkt2 z&Lv-Y0d3ECy!tmP{V}|vm{K3>TgERqA$|y)j^E4!EBhb5uG!f4I=cEn(?D4-#%T;( zJ8BGUbU2*PIxYC|(l;D_T88lk#Bh|FZbt7UpQ}UTH#g%Z+XD?X@7CwW5nKQE@QqkH{bkQDq|j0PPhtT%yFb?!uUeeON# z!q_9=F~{h{<1GoU#FSh3Hk?>{=Dh*_56k=%x|bTzs7_LqgXNN!g0-S z0P}ma;W(}Hrhz2^L<7_8V49EP$~nw7oG#SU7If(}SKY$9$uM{IYFwCyd%@3dIG(G! zjF-g876kXF_#v+rLImr}`4zTEy%1K@G_G_ngmYlUKzcUC=SXF&5^;9|I;P=%Ed)3DNord!tK=u6y>-wW; zvSzp`_p7Gd`?(jLy{I_(na2KG4AlLQ>!_j?6;rQ7Qg+TDhqMxJ2;UescZ{6}JVpO7 z3QR_EJzhv&`^z4oyTvr?ae_X*pn!hQwED4I{5!@vXd{;E$%x&uc*oeb0_OF)*Y7+9 z*-Z9tY}vF;)Vw5@oPiDdBg}f=a2M^luh5<@ulbcUb04DSN!oaFC-koCouBKPv2)bC zoBwu@+Lw1*yY9!}g(D?>!&r)byR0$y^2sltg*}B9et=8SxfhCXVC3g_E#%(gJ_Jcq zZk*S|g!>QeFS!q#y)bK(+l#;EUFSuhrrdju35q5zq9*b}FR2OkM*l5-=Z8wKRO#(F zGMKLv;PaIwvLt!Fa;_}tDJEZerYsl8@+?{UWqGzN7t8V-SuU03XJxremYr1*Kn-=+96;twd^NBm8S|1Gx9v9#jlj7eXKBV|D;@2tuRpM7G{uSbvftN1@_hh1J`c%+xow=nUTu=1+Esa|nh-uie zW%Cx%luD&V<7N@bSm|iS65(u5GA3ftY&M!~%S5ZgcYz;ft{8eXpe($v$C;FJZ+~kf%Z(QGmuTic3HN_+R=I`WYDOXZ;;o&&Lx^VM=3+PE=*Z^QIC9aMZX%vFqcJo|QO3p16h0Zi&GMEZXj}4E3q_rnt?N;%X3!%!ymz%h4nyc3Y z?C35l8%U-C(ag@SPRv8r6)M-nbR{!ZySSFhjFpUG{xPAhdfGtgGY#(EVl^@)qPSg9 z^7-)p3Q^*tZ)qV2DC_TH#FO0uu@G`;Q5t{^-91s^i_8%vz1Vz5ONB33CQ9&RJ;#p= zVDJp#^OuW~VO(sB=9f|zWRg$o;Cx@-B7EI`pMSv_<%6hQ1sV0R{4Kgg8&Rrr783h5Z}Xkw@am|_3om&x0OFUva6*1F{J)Ih3v3D z!ZJ;v_e)fa&p>Yh#(0YLHnF>@#<3210q!6Dyp&0|d)2 z)Z0(7#r{O;bmMpwdgU|FI}SZR>z!^KA40E|^=haOlsx00-)*a~wj#JkP@*11^*givQ=cmv+JOjPAp*O^Om26)a zw`*N5!Y=CmXLIUdJNPg8d-yoh51E$BAL>FZW*T665z|_x^-SrV2&EgCb}+S>_AFZ2SF)ihQ_?a$ay1Ec3)h~I64)D!BfE!&$s?Gg?rB_y;`os)0WfC{~Wdtc`PHm&OG8z9P0V6 zoZ+~<9SW`VN9Fdbrg zkm(_&hnWsDJ;ro|=?SKzOdY1;^ZEGhHa7xFdLyiNnDv5ajO5)+J3z^8nBrGE5+l0N>E za)Q!h#S4b!ga&>7U@$V` zYe6|&GUN*e{r+Git=>?#VUBgW1dJOu0JqQaiTQu@i)PY6A4L_4G{5M31bCGTKMA}a zStrt8g*-$I5)nFc0*pQ+xe> z;ykw3SldTD?Yl9xuYc!X#P%9%`w@@58d;>hK zAC1AP>xYby2$TH~0VKj?uiu;WgC~29sh-xaJ6>e(#$>PGt9%PQ*=r1TM)vx>%l&Mx zv9|Za4<*9Xz8h2f`n?Q&L!tIH20NqcZwl;U}S>h*rVn6ci!S1{K5@mj`uf4zdS-Y=UN>;3OW#(F>NWUTk6y^QsKb1P%L ze+)9#`@we^xA1y@n6bWI4{3d_f10tro_@wyUmq_q*6aN>#(F)EYkOXgA8LL5U8)rE zbFH7njP?3i$yl$SKVYoa&nB(EQq7O4^%?hQeO@npjP?4tm$6sNU27d^P%gSUHd%7gnnn4d=r{rO8z{WBi?ya&^B zrF%RddhinU{8{k7#)IoUc)JHDJ@_UMzQcnbP#9O8N(U827ghRe4?gO_$31w|gWvSv zk35*pR-my$dn-J6jRy~+Tl5Y6kCFcbna(?e$n_Sx&#UEd0stad`s^h6@G2xt5Y=$mcyW)wqYWyW< zx~el8OJ%#)):t?jP8#JptP+Ka0$o!H2PHc~D-v94_s+gb+LN|)n5coOU3(JoEF!4~IJGU;Dr<(nvnQ3oNfcCWNa5cixDsbTn^HK-2tgemz%=VRtk^D+;BcbZY}w7~U!-8c^_jSB z;dEg%o|VTnNxVCnFdK2nZ0*QcQRQCBwpdx5G_{midL+%}=x(ow4arA5nuvehYLnXC zF|(mNW~J?TDyao@iO5rvB)F=vso9R&mf4WZcHywBTeQVWSkbJMZB+X}xUH=*Y1gHa z*ea5AVfPd(Hp`>4BvTwNq$_b?w!Uy))>TE3x)K+i?iO{)yvj;inRsjy?MIOwDcXWy z^bnH4QEjcmT>$?q0%V&~NeiLvilHa?cQxoBs4%VFym70&xd`O}U8=@j$~&C~w1a}j m4nX@P{{4^?8r1@MH2|b}8!y@xCF~&6g&;#!ZJxMO3-R9oZ9(q< literal 0 HcmV?d00001 From 34cc31e02395965260a330c265bef3bb65cbc5d3 Mon Sep 17 00:00:00 2001 From: Mike Adamson Date: Mon, 24 Nov 2014 18:17:17 +0000 Subject: [PATCH 1044/3726] Added SaslAuthProvider and SaslAuthenticator for kerberos support --- cassandra/auth.py | 43 +++++++++++++++++++++++++++++++++++++++++ cassandra/connection.py | 2 +- cassandra/protocol.py | 2 +- 3 files changed, 45 insertions(+), 2 deletions(-) diff --git a/cassandra/auth.py b/cassandra/auth.py index 477d1b4dce..f14184f951 100644 --- a/cassandra/auth.py +++ b/cassandra/auth.py @@ -123,3 +123,46 @@ def initial_response(self): def evaluate_challenge(self, challenge): return None + + +class SaslAuthProvider(AuthProvider): + """ + An :class:`~.AuthProvider` that works with DSE's KerberosAuthenticator. + + Example usage:: + + from cassandra.cluster import Cluster + from cassandra.auth import PlainTextAuthProvider + + sasl_kwargs = {'host': 'localhost', + 'service': 'dse', + 'mechanism': 'GSSAPI', + 'qops': 'auth'.split(',')} + auth_provider = SaslAuthProvider(**sasl_kwargs) + cluster = Cluster(auth_provider=auth_provider) + + .. versionadded:: 2.1.4 + """ + + def __init__(self, **sasl_kwargs): + self.sasl_kwargs = sasl_kwargs + + def new_authenticator(self, host): + return SaslAuthenticator(**self.sasl_kwargs) + +class SaslAuthenticator(Authenticator): + """ + An :class:`~.Authenticator` that works with DSE's KerberosAuthenticator. + + .. versionadded:: 2.1.4 + """ + + def __init__(self, host, service, mechanism='GSSAPI', **sasl_kwargs): + from puresasl.client import SASLClient + self.sasl = SASLClient(host, service, mechanism, **sasl_kwargs) + + def initial_response(self): + return self.sasl.process() + + def evaluate_challenge(self, challenge): + return self.sasl.process(challenge) diff --git a/cassandra/connection.py b/cassandra/connection.py index 0087d50b4a..cf093eb721 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -557,7 +557,7 @@ def _handle_startup_response(self, startup_response, did_authenticate=False): else: log.debug("Sending SASL-based auth response on %s", self) initial_response = self.authenticator.initial_response() - initial_response = "" if initial_response is None else initial_response.encode('utf-8') + initial_response = "" if initial_response is None else initial_response self.send_msg(AuthResponseMessage(initial_response), 0, self._handle_auth_response) elif isinstance(startup_response, ErrorMessage): log.debug("Received ErrorMessage on new connection (%s) from %s: %s", diff --git a/cassandra/protocol.py b/cassandra/protocol.py index e995dee76e..2862055381 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -384,7 +384,7 @@ def __init__(self, challenge): @classmethod def recv_body(cls, f, protocol_version, user_type_map): - return cls(read_longstring(f)) + return cls(read_binary_longstring(f)) class AuthResponseMessage(_MessageType): From 5bade68a6513bb0cd47912456f03bc007927de75 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 24 Nov 2014 13:37:48 -0600 Subject: [PATCH 1045/3726] Info log when connections established to 'up' nodes PYTHON-116 --- cassandra/cluster.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 4e099b1d72..7833dc54d5 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -773,6 +773,7 @@ def _on_up_future_completed(self, host, futures, results, lock, finished_future) self._cleanup_failed_on_up_handling(host) return + log.info("Connection pools established for node %s", host) # mark the host as up and notify all listeners host.set_up() for listener in self.listeners: From 07a4a7c403ce01e7290821af7e2b4439f04f5580 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 24 Nov 2014 16:49:12 -0600 Subject: [PATCH 1046/3726] Don't try to connect to peers with incomplete meta PYTHON-163 --- cassandra/cluster.py | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 4e099b1d72..7d02c3f6dc 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2060,19 +2060,25 @@ def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, if not addr or addr in ["0.0.0.0", "::"]: addr = row.get("peer") - found_hosts.add(addr) + tokens = row.get("tokens") + peer_meta_complete = bool(tokens) host = self._cluster.metadata.get_host(addr) datacenter = row.get("data_center") rack = row.get("rack") if host is None: log.debug("[control connection] Found new host to connect to: %s", addr) - host = self._cluster.add_host(addr, datacenter, rack, signal=True) - should_rebuild_token_map = True + host = self._cluster.add_host(addr, datacenter, rack, signal=peer_meta_complete) + should_rebuild_token_map |= peer_meta_complete else: - should_rebuild_token_map |= self._update_location_info(host, datacenter, rack) + should_rebuild_token_map |= self._update_location_info(host, datacenter, rack) and peer_meta_complete + + if not peer_meta_complete: + log.warn("Excluding host (%s) with no tokens in system.peers table of %s." % (addr, connection.host)) + continue + + found_hosts.add(addr) - tokens = row.get("tokens") if partitioner and tokens: token_map[host] = tokens From f37bc2c5fd1c60e52550227db20e0fafa92e041e Mon Sep 17 00:00:00 2001 From: Mike Adamson Date: Wed, 26 Nov 2014 11:14:28 +0000 Subject: [PATCH 1047/3726] Import error handling for SaslAuthProvider --- cassandra/auth.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/cassandra/auth.py b/cassandra/auth.py index f14184f951..0d50851408 100644 --- a/cassandra/auth.py +++ b/cassandra/auth.py @@ -10,7 +10,10 @@ # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and - +try: + from puresasl.client import SASLClient +except ImportError: + SASLClient = None class AuthProvider(object): """ @@ -146,6 +149,8 @@ class SaslAuthProvider(AuthProvider): def __init__(self, **sasl_kwargs): self.sasl_kwargs = sasl_kwargs + if SASLClient is None: + raise ImportError('The puresasl library has not been installed') def new_authenticator(self, host): return SaslAuthenticator(**self.sasl_kwargs) @@ -158,7 +163,6 @@ class SaslAuthenticator(Authenticator): """ def __init__(self, host, service, mechanism='GSSAPI', **sasl_kwargs): - from puresasl.client import SASLClient self.sasl = SASLClient(host, service, mechanism, **sasl_kwargs) def initial_response(self): From d600e5144c0fb33a1c7cb3b2ed1a8943a953a639 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 26 Nov 2014 18:34:23 +0000 Subject: [PATCH 1048/3726] Remove ./debian directory Based on recommendation of Debian OpenStack maintainer, in accordance with https://wiki.debian.org/UpstreamGuide#Pristine_Upstream_Source --- debian/changelog | 41 ----------------- debian/compat | 1 - debian/control | 46 -------------------- debian/copyright | 28 ------------ debian/patches/0001-don-t-use-ez_setup.patch | 38 ---------------- debian/patches/series | 1 - debian/python-cassandra-driver-dbg.install | 2 - debian/python-cassandra-driver-doc.docs | 1 - debian/python-cassandra-driver.install | 4 -- debian/rules | 16 ------- debian/source/format | 1 - 11 files changed, 179 deletions(-) delete mode 100644 debian/changelog delete mode 100644 debian/compat delete mode 100644 debian/control delete mode 100644 debian/copyright delete mode 100644 debian/patches/0001-don-t-use-ez_setup.patch delete mode 100644 debian/patches/series delete mode 100644 debian/python-cassandra-driver-dbg.install delete mode 100644 debian/python-cassandra-driver-doc.docs delete mode 100644 debian/python-cassandra-driver.install delete mode 100755 debian/rules delete mode 100644 debian/source/format diff --git a/debian/changelog b/debian/changelog deleted file mode 100644 index eb73ff54e0..0000000000 --- a/debian/changelog +++ /dev/null @@ -1,41 +0,0 @@ -python-cassandra-driver (2.1.2) unstable; urgency=low - - * Release 2.1.2 - - -- Adam Holmberg Thu, 16 Oct 2014 18:30:22 +0000 - -python-cassandra-driver (2.1.1) unstable; urgency=low - - * Release 2.1.1 - - -- Adam Holmberg Tue, 09 Sep 2014 17:14:41 +0000 - -python-cassandra-driver (2.1.0) unstable; urgency=low - - * Release 2.1.0 - - -- Adam Holmberg Thu, 07 Aug 2014 19:01:17 +0000 - -python-cassandra-driver (2.0.2-2) unstable; urgency=low - - * Fixed debian/patches - - -- Carsten Aulbert Thu, 19 Jun 2014 14:07:27 +0200 - -python-cassandra-driver (2.0.2-1) unstable; urgency=low - - * Release 2.0.2 - - -- Tyler Hobbs Tue, 10 Jun 2014 16:22:23 -0500 - -python-cassandra-driver (2.0.2~prerelease-1) unstable; urgency=low - - * Update dependencies for 2.0.x - - -- Tyler Hobbs Tue, 03 Jun 2014 15:16:53 -0500 - -python-cassandra-driver (1.1.0~prerelease-1) unstable; urgency=low - - * Initial packaging - - -- paul cannon Thu, 03 Apr 2014 10:30:11 -0600 diff --git a/debian/compat b/debian/compat deleted file mode 100644 index ec635144f6..0000000000 --- a/debian/compat +++ /dev/null @@ -1 +0,0 @@ -9 diff --git a/debian/control b/debian/control deleted file mode 100644 index 6a698a8fb8..0000000000 --- a/debian/control +++ /dev/null @@ -1,46 +0,0 @@ -Source: python-cassandra-driver -Maintainer: paul cannon -Section: python -Priority: optional -Build-Depends: python-all-dev (>= 2.6.6-3), python-all-dbg, debhelper (>= 9), - python-sphinx (>= 1.0.7+dfsg) | python3-sphinx, libev-dev, - python-concurrent.futures | python-futures, python-setuptools, - python-nose, python-mock, python-yaml, python-gevent, - python-blist, python-tz, python-six (>= 1.6) -X-Python-Version: >= 2.7 -Standards-Version: 3.9.4 - -Package: python-cassandra-driver -Architecture: any -Depends: ${misc:Depends}, ${python:Depends}, ${shlibs:Depends}, - python-concurrent.futures | python-futures, python-six (>= 1.6) -Provides: ${python:Provides} -Recommends: python-scales, python-blist -Suggests: python-cassandra-driver-doc -Description: Python driver for Apache Cassandra - This driver works exclusively with the Cassandra Query Language v3 (CQL3) - and Cassandra's native protocol. As such, only Cassandra 1.2+ is supported. - -Package: python-cassandra-driver-dbg -Architecture: any -Depends: ${misc:Depends}, ${python:Depends}, ${shlibs:Depends}, - python-cassandra-driver (= ${binary:Version}) -Provides: ${python:Provides} -Section: debug -Priority: extra -Description: Python driver for Apache Cassandra (debug build and symbols) - This driver works exclusively with the Cassandra Query Language v3 (CQL3) - and Cassandra's native protocol. As such, only Cassandra 1.2+ is supported. - . - This package contains debug builds of the extensions and debug symbols for - the extensions in the main package. - -Package: python-cassandra-driver-doc -Architecture: all -Section: doc -Priority: extra -Depends: ${misc:Depends}, ${sphinxdoc:Depends} -Suggests: python-cassandra-driver -Description: Python driver for Apache Cassandra (documentation) - This contains HTML documentation for the use of the Python Cassandra - driver. diff --git a/debian/copyright b/debian/copyright deleted file mode 100644 index f138547ae4..0000000000 --- a/debian/copyright +++ /dev/null @@ -1,28 +0,0 @@ -Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ -Upstream-Name: python-driver -Upstream-Contact: Tyler Hobbs -Source: https://github.com/datastax/python-driver - -Files: * -Copyright: Copyright 2013, DataStax -License: Apache-2.0 - -Files: debian/* -Copyright: Copyright (c) 2014 by Space Monkey, Inc. -License: Apache-2.0 - -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - On Debian systems, the full text of the Apache License version 2.0 - can be found in the file `/usr/share/common-licenses/Apache-2.0'. diff --git a/debian/patches/0001-don-t-use-ez_setup.patch b/debian/patches/0001-don-t-use-ez_setup.patch deleted file mode 100644 index 32996d607e..0000000000 --- a/debian/patches/0001-don-t-use-ez_setup.patch +++ /dev/null @@ -1,38 +0,0 @@ -From: paul cannon -Date: Thu, 3 Apr 2014 11:27:09 -0600 -Subject: don't use ez_setup - -Debian packages aren't supposed to download stuff while building, and -since the version of setuptools in stable is less than the one ez_setup -wants, and since some system python packages don't ship their .egg-info -directories, it might try. - -It's ok though, we can rely on the Depends and Build-Depends for making -sure python-setuptools and the various other deps are around at the right -times. ---- -diff --git a/setup.py b/setup.py -index 11cfd4c..4f05d1f 100644 ---- a/setup.py -+++ b/setup.py -@@ -20,9 +20,6 @@ if __name__ == '__main__' and sys.argv[1] == "gevent_nosetests": - from gevent.monkey import patch_all - patch_all() - --import ez_setup --ez_setup.use_setuptools() -- - from setuptools import setup - from distutils.command.build_ext import build_ext - from distutils.core import Extension -@@ -194,8 +191,8 @@ def run_setup(extensions): - author_email='tyler@datastax.com', - packages=['cassandra', 'cassandra.io'], - include_package_data=True, -- install_requires=dependencies, -- tests_require=['nose', 'mock', 'PyYAML', 'pytz'], -+ install_requires=(), -+ tests_require=(), - classifiers=[ - 'Development Status :: 5 - Production/Stable', - 'Intended Audience :: Developers', diff --git a/debian/patches/series b/debian/patches/series deleted file mode 100644 index 25373f1872..0000000000 --- a/debian/patches/series +++ /dev/null @@ -1 +0,0 @@ -0001-don-t-use-ez_setup.patch diff --git a/debian/python-cassandra-driver-dbg.install b/debian/python-cassandra-driver-dbg.install deleted file mode 100644 index a75d4c7860..0000000000 --- a/debian/python-cassandra-driver-dbg.install +++ /dev/null @@ -1,2 +0,0 @@ -usr/lib/python2*/*-packages/cassandra/*_d.so -usr/lib/python2*/*-packages/cassandra/io/*_d.so diff --git a/debian/python-cassandra-driver-doc.docs b/debian/python-cassandra-driver-doc.docs deleted file mode 100644 index 300da92101..0000000000 --- a/debian/python-cassandra-driver-doc.docs +++ /dev/null @@ -1 +0,0 @@ -docs/_build/*/* diff --git a/debian/python-cassandra-driver.install b/debian/python-cassandra-driver.install deleted file mode 100644 index cf5c1ebe5f..0000000000 --- a/debian/python-cassandra-driver.install +++ /dev/null @@ -1,4 +0,0 @@ -usr/lib/python2*/*-packages/cassandra/*[!_][!_].so -usr/lib/python2*/*-packages/cassandra/*.py -usr/lib/python2*/*-packages/cassandra/io/*[!_][!_].so -usr/lib/python2*/*-packages/cassandra/io/*.py diff --git a/debian/rules b/debian/rules deleted file mode 100755 index 1943cfc14f..0000000000 --- a/debian/rules +++ /dev/null @@ -1,16 +0,0 @@ -#!/usr/bin/make -f - -%: - dh $@ --with python2,sphinxdoc - -override_dh_auto_build: - dh_auto_build - python setup.py doc - -ifeq (,$(filter nocheck,$(DEB_BUILD_OPTIONS))) -override_dh_auto_test: - python setup.py gevent_nosetests -endif - -override_dh_strip: - dh_strip --dbg-package=python-cassandra-driver-dbg diff --git a/debian/source/format b/debian/source/format deleted file mode 100644 index 163aaf8d82..0000000000 --- a/debian/source/format +++ /dev/null @@ -1 +0,0 @@ -3.0 (quilt) From cee2f673c0594aa631f4fe0ef08c23ee146c3fb9 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 26 Nov 2014 18:38:46 +0000 Subject: [PATCH 1049/3726] Remove debian references from release procedure. --- README-dev.rst | 7 ------- 1 file changed, 7 deletions(-) diff --git a/README-dev.rst b/README-dev.rst index 75d8f2466c..3d1f9b19f8 100644 --- a/README-dev.rst +++ b/README-dev.rst @@ -1,13 +1,6 @@ Releasing ========= * Run the tests and ensure they all pass -* If dependencies have changed, make sure ``debian/control`` - is up to date -* Make sure all patches in ``debian/patches`` still apply cleanly -* Update the debian changelog with the new version:: - - dch -v '1.0.0' - * Update CHANGELOG.rst * Update the version in ``cassandra/__init__.py`` From ca4d5b0303f21e108d34000b9e7b0cbf99b343ed Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 26 Nov 2014 15:26:05 -0600 Subject: [PATCH 1050/3726] Copy keyspace meta tables on refresh PYTHON-173 --- cassandra/metadata.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index ec9062a712..f0befd95e3 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -156,6 +156,7 @@ def keyspace_changed(self, keyspace, ks_results): old_keyspace_meta = self.keyspaces.get(keyspace, None) self.keyspaces[keyspace] = keyspace_meta if old_keyspace_meta: + keyspace_meta.tables = old_keyspace_meta.tables keyspace_meta.user_types = old_keyspace_meta.user_types if (keyspace_meta.replication_strategy != old_keyspace_meta.replication_strategy): self._keyspace_updated(keyspace) From cdfa7a7a241ed080e9218d1cfc98e2b8a2fa3765 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 26 Nov 2014 16:05:04 -0600 Subject: [PATCH 1051/3726] Select and use peer address for schema agreement PYTHON-166 Fixes a problem with schema agreement which manifests when rpc_address = 0.0.0.0 --- cassandra/cluster.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 4e099b1d72..bc64c139dc 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1713,7 +1713,7 @@ class ControlConnection(object): _SELECT_PEERS = "SELECT peer, data_center, rack, tokens, rpc_address, schema_version FROM system.peers" _SELECT_LOCAL = "SELECT cluster_name, data_center, rack, tokens, partitioner, schema_version FROM system.local WHERE key='local'" - _SELECT_SCHEMA_PEERS = "SELECT rpc_address, schema_version FROM system.peers" + _SELECT_SCHEMA_PEERS = "SELECT peer, rpc_address, schema_version FROM system.peers" _SELECT_SCHEMA_LOCAL = "SELECT schema_version FROM system.local WHERE key='local'" _is_shutdown = False @@ -2205,16 +2205,17 @@ def _get_schema_mismatches(self, peers_result, local_result, local_address): versions[local_row.get("schema_version")].add(local_address) for row in peers_result: - if not row.get("rpc_address") or not row.get("schema_version"): + schema_ver = row.get('schema_version') + if not schema_ver: continue - rpc = row.get("rpc_address") - if rpc == "0.0.0.0": # TODO ipv6 check - rpc = row.get("peer") + addr = row.get("rpc_address") + if not addr or addr in ["0.0.0.0", "::"]: + addr = row.get("peer") - peer = self._cluster.metadata.get_host(rpc) + peer = self._cluster.metadata.get_host(addr) if peer and peer.is_up: - versions[row.get("schema_version")].add(rpc) + versions[row.get("schema_version")].add(addr) if len(versions) == 1: log.debug("[control connection] Schemas match") From 382d2229c85379c2ef8d6a1a75adc210a858c88e Mon Sep 17 00:00:00 2001 From: Tyler Hobbs Date: Wed, 26 Nov 2014 16:25:24 -0600 Subject: [PATCH 1052/3726] Specify "compression = {}" when compression is disabled This was brought up in CASSANDRA-8288 --- cassandra/metadata.py | 5 ++--- tests/integration/standard/test_metadata.py | 7 +++++++ 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index ec9062a712..ca355b033a 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -937,9 +937,8 @@ def _make_option_strings(self): if not options_copy.get('compression'): params = json.loads(options_copy.pop('compression_parameters', '{}')) - if params: - param_strings = ["'%s': '%s'" % (k, v) for k, v in params.items()] - ret.append('compression = {%s}' % ', '.join(param_strings)) + param_strings = ["'%s': '%s'" % (k, v) for k, v in params.items()] + ret.append('compression = {%s}' % ', '.join(param_strings)) for name, value in options_copy.items(): if value is not None: diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 635490b744..0899e859a9 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -301,6 +301,13 @@ def test_indexes(self): self.assertIn('CREATE INDEX d_index', statement) self.assertIn('CREATE INDEX e_index', statement) + def test_compression_disabled(self): + create_statement = self.make_create_statement(["a"], ["b"], ["c"]) + create_statement += " WITH compression = {}" + self.session.execute(create_statement) + tablemeta = self.get_table_metadata() + self.assertIn("compression = {}", tablemeta.export_as_string()) + class TestCodeCoverage(unittest.TestCase): From f32831fdf0e88520300777a5cafe2a646bfb30c7 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 1 Dec 2014 16:23:22 +0000 Subject: [PATCH 1053/3726] Treat control_connection_timeout like other cluster attributes. --- cassandra/cluster.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 0ce32d5838..d6189a9f46 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -366,10 +366,9 @@ def auth_provider(self, value): control_connection_timeout = 2.0 """ - A default timeout, in seconds, for queries made by the control connection, - such as querying the current schema and information about nodes in the - cluster. If set to :const:`None`, there will be no timeout for these - queries. Can be overridden by passing it to __init__() + A timeout, in seconds, for queries made by the control connection, such + as querying the current schema and information about nodes in the cluster. + If set to :const:`None`, there will be no timeout for these queries. """ sessions = None @@ -406,7 +405,7 @@ def __init__(self, protocol_version=2, executor_threads=2, max_schema_agreement_wait=10, - control_connection_timeout=control_connection_timeout): + control_connection_timeout=2.0): """ Any of the mutable Cluster attributes may be set as keyword arguments to the constructor. From 1d924f1e44de2dbff28bc843a0e7b9ee94244bb6 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 1 Dec 2014 16:24:34 +0000 Subject: [PATCH 1054/3726] Don't sleep following a timeout in schema wait. --- cassandra/cluster.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index d6189a9f46..57df88bc06 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2162,7 +2162,6 @@ def wait_for_schema_agreement(self, connection=None, preloaded_results=None): log.debug("[control connection] Waiting for schema agreement") start = self._time.time() elapsed = 0 - poll_interval = 0.2 cl = ConsistencyLevel.ONE total_timeout = self._cluster.max_schema_agreement_wait schema_mismatches = None @@ -2176,7 +2175,6 @@ def wait_for_schema_agreement(self, connection=None, preloaded_results=None): except OperationTimedOut as timeout: log.debug("[control connection] Timed out waiting for " "response during schema agreement check: %s", timeout) - self._time.sleep(poll_interval) elapsed = self._time.time() - start continue except ConnectionShutdown: @@ -2191,7 +2189,7 @@ def wait_for_schema_agreement(self, connection=None, preloaded_results=None): return True log.debug("[control connection] Schemas mismatched, trying again") - self._time.sleep(poll_interval) + self._time.sleep(0.2) elapsed = self._time.time() - start log.warn("Node %s is reporting a schema disagreement: %s", From 18c31d673f7ee132ef4cc413d8fbdedb2e654b18 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 1 Dec 2014 18:52:56 +0000 Subject: [PATCH 1055/3726] Make test_refresh_schema_timeout test work previously a failed assertion in bad_wait_for_responses was causing the test to take an unexpected path, failing silently --- tests/unit/test_control_connection.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/tests/unit/test_control_connection.py b/tests/unit/test_control_connection.py index 3ca2e8160d..12d09781e5 100644 --- a/tests/unit/test_control_connection.py +++ b/tests/unit/test_control_connection.py @@ -59,7 +59,7 @@ def rebuild_token_map(self, partitioner, token_map): class MockCluster(object): - max_schema_agreement_wait = Cluster.max_schema_agreement_wait + max_schema_agreement_wait = 5 load_balancing_policy = RoundRobinPolicy() reconnection_policy = ConstantReconnectionPolicy(2) down_host = None @@ -131,7 +131,7 @@ def setUp(self): self.connection = MockConnection() self.time = FakeTime() - self.control_connection = ControlConnection(self.cluster, timeout=0.01) + self.control_connection = ControlConnection(self.cluster, timeout=1) self.control_connection._connection = self.connection self.control_connection._time = self.time @@ -206,7 +206,7 @@ def test_wait_for_schema_agreement_fails(self): self.connection.peer_results[1][1][2] = 'b' self.assertFalse(self.control_connection.wait_for_schema_agreement()) # the control connection should have slept until it hit the limit - self.assertGreaterEqual(self.time.clock, Cluster.max_schema_agreement_wait) + self.assertGreaterEqual(self.time.clock, self.cluster.max_schema_agreement_wait) def test_wait_for_schema_agreement_skipping(self): """ @@ -247,7 +247,7 @@ def test_wait_for_schema_agreement_rpc_lookup(self): # but once we mark it up, the control connection will care host.is_up = True self.assertFalse(self.control_connection.wait_for_schema_agreement()) - self.assertGreaterEqual(self.time.clock, Cluster.max_schema_agreement_wait) + self.assertGreaterEqual(self.time.clock, self.cluster.max_schema_agreement_wait) def test_refresh_nodes_and_tokens(self): self.control_connection.refresh_node_list_and_token_map() @@ -332,12 +332,13 @@ def bad_wait_for_responses(*args, **kwargs): def test_refresh_schema_timeout(self): def bad_wait_for_responses(*args, **kwargs): - self.assertEqual(kwargs['timeout'], self.control_connection._timeout) + self.time.sleep(kwargs['timeout']) raise OperationTimedOut() - self.connection.wait_for_responses = bad_wait_for_responses + self.connection.wait_for_responses = Mock(side_effect=bad_wait_for_responses) self.control_connection.refresh_schema() - self.cluster.executor.submit.assert_called_with(self.control_connection._reconnect) + self.assertEqual(self.connection.wait_for_responses.call_count, self.cluster.max_schema_agreement_wait/self.control_connection._timeout) + self.assertEqual(self.connection.wait_for_responses.call_args[1]['timeout'], self.control_connection._timeout) def test_handle_topology_change(self): event = { From 7a7e1172d7437857dec90eed6ae7083045a619e4 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 1 Dec 2014 22:45:46 +0000 Subject: [PATCH 1056/3726] Use session default_timeout waiting on prepare futures PYTHON-179; fixes issue where prepare phase can hang indefinitely if peers are unreachable. --- cassandra/cluster.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 7833dc54d5..318ff6e2ee 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1427,7 +1427,7 @@ def prepare(self, query): future = ResponseFuture(self, message, query=None) try: future.send_request() - query_id, column_metadata = future.result() + query_id, column_metadata = future.result(self.default_timeout) except Exception: log.exception("Error preparing query:") raise @@ -1473,7 +1473,7 @@ def prepare_on_all_hosts(self, query, excluded_host): for host, future in futures: try: - future.result() + future.result(self.default_timeout) except Exception: log.exception("Error preparing query for host %s:", host) From c875016c6ace6fdbd60dc7d1a02063ad8517d8e5 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 2 Dec 2014 16:44:37 +0000 Subject: [PATCH 1057/3726] Avoid TokenAware failure when keyspace not set PYTHON-181; fixes KeyError when routing_key set with no keyspace --- cassandra/policies.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/policies.py b/cassandra/policies.py index 94363db931..d4106a8db3 100644 --- a/cassandra/policies.py +++ b/cassandra/policies.py @@ -359,7 +359,7 @@ def make_query_plan(self, working_keyspace=None, query=None): yield host else: routing_key = query.routing_key - if routing_key is None: + if routing_key is None or keyspace is None: for host in child.make_query_plan(keyspace, query): yield host else: From b7d66a3951fc114ee48e44e56bfb0b33394a9f43 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 1 Dec 2014 23:10:53 +0000 Subject: [PATCH 1058/3726] Print CL name, instead of enum, in error messages PYTHON-180 --- cassandra/__init__.py | 20 +++++++++++++++----- cassandra/protocol.py | 2 -- 2 files changed, 15 insertions(+), 7 deletions(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 5be259b91e..be8bad8494 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -122,6 +122,10 @@ class ConsistencyLevel(object): } +def consistency_value_to_name(value): + return ConsistencyLevel.value_to_name[value] if value is not None else "Not Set" + + class Unavailable(Exception): """ There were not enough live replicas to satisfy the requested consistency @@ -138,11 +142,14 @@ class Unavailable(Exception): alive_replicas = None """ The number of replicas that were actually alive """ - def __init__(self, message, consistency=None, required_replicas=None, alive_replicas=None): - Exception.__init__(self, message) - self.consistency = consistency + def __init__(self, summary_message, consistency=None, required_replicas=None, alive_replicas=None): + self.consistency_level = consistency self.required_replicas = required_replicas self.alive_replicas = alive_replicas + Exception.__init__(self, summary_message + ' info=' + + repr({'consistency': consistency_value_to_name(consistency), + 'required_replicas': required_replicas, + 'alive_replicas': alive_replicas})) class Timeout(Exception): @@ -162,11 +169,14 @@ class Timeout(Exception): the operation """ - def __init__(self, message, consistency=None, required_responses=None, received_responses=None): - Exception.__init__(self, message) + def __init__(self, summary_message, consistency=None, required_responses=None, received_responses=None): self.consistency = consistency self.required_responses = required_responses self.received_responses = received_responses + Exception.__init__(self, summary_message + ' info=' + + repr({'consistency': consistency_value_to_name(consistency), + 'required_responses': required_responses, + 'received_responses': received_responses})) class ReadTimeout(Timeout): diff --git a/cassandra/protocol.py b/cassandra/protocol.py index e995dee76e..291356ad08 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -149,8 +149,6 @@ def recv_body(cls, f, protocol_version, user_type_map): def summary_msg(self): msg = 'code=%04x [%s] message="%s"' \ % (self.code, self.summary, self.message) - if self.info is not None: - msg += (' info=' + repr(self.info)) return msg def __str__(self): From 37572a939f42df779d865f945dfb32038c6cc5a1 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 2 Dec 2014 19:17:53 +0000 Subject: [PATCH 1059/3726] Make keyspace settable on all statement types Change Statement.keyspace to be handled more like routing_key: - now settable on simplestatements - now set implicitly along with routing key in batch statements Also: - changed BoundStatement.bind to raise if not enough parameters for routing key - fixed minor typo in example in comments --- cassandra/cluster.py | 2 +- cassandra/query.py | 81 ++++++++++++++---------- tests/integration/standard/test_query.py | 7 +- tests/unit/test_policies.py | 4 +- 4 files changed, 51 insertions(+), 43 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 7833dc54d5..2ec77922f5 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2597,7 +2597,7 @@ def _set_result(self, response): current_keyspace = self._connection.keyspace prepared_keyspace = prepared_statement.keyspace - if current_keyspace != prepared_keyspace: + if prepared_keyspace and current_keyspace != prepared_keyspace: self._set_final_exception( ValueError("The Session's current keyspace (%s) does " "not match the keyspace the statement was " diff --git a/cassandra/query.py b/cassandra/query.py index 791b00692d..22871f5d29 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -184,19 +184,33 @@ class Statement(object): .. versionadded:: 2.0.0 """ + keyspace = None + """ + The string name of the keyspace this query acts on. This is used when + :class:`~.TokenAwarePolicy` is configured for + :attr:`.Cluster.load_balancing_policy` + + It is set implicitly on :class:`.BoundStatement`, and :class:`.BatchStatement`, + but must be set explicitly on :class:`.SimpleStatement`. + + .. versionadded:: 2.1.3 + """ + _serial_consistency_level = None _routing_key = None def __init__(self, retry_policy=None, consistency_level=None, routing_key=None, - serial_consistency_level=None, fetch_size=FETCH_SIZE_UNSET): + serial_consistency_level=None, fetch_size=FETCH_SIZE_UNSET, keyspace=None): self.retry_policy = retry_policy if consistency_level is not None: self.consistency_level = consistency_level + self._routing_key = routing_key if serial_consistency_level is not None: self.serial_consistency_level = serial_consistency_level if fetch_size is not FETCH_SIZE_UNSET: self.fetch_size = fetch_size - self._routing_key = routing_key + if keyspace is not None: + self.keyspace = keyspace def _get_routing_key(self): return self._routing_key @@ -272,13 +286,6 @@ def _del_serial_consistency_level(self): .. versionadded:: 2.0.0 """) - @property - def keyspace(self): - """ - The string name of the keyspace this query acts on. - """ - return None - class SimpleStatement(Statement): """ @@ -319,14 +326,14 @@ class PreparedStatement(object): column_metadata = None query_id = None query_string = None - keyspace = None + keyspace = None # change to prepared_keyspace in major release routing_key_indexes = None consistency_level = None serial_consistency_level = None - _protocol_version = None + protocol_version = None fetch_size = FETCH_SIZE_UNSET @@ -338,16 +345,16 @@ def __init__(self, column_metadata, query_id, routing_key_indexes, query, keyspa self.routing_key_indexes = routing_key_indexes self.query_string = query self.keyspace = keyspace - self._protocol_version = protocol_version + self.protocol_version = protocol_version self.consistency_level = consistency_level self.serial_consistency_level = serial_consistency_level if fetch_size is not FETCH_SIZE_UNSET: self.fetch_size = fetch_size @classmethod - def from_message(cls, query_id, column_metadata, cluster_metadata, query, keyspace, protocol_version): + def from_message(cls, query_id, column_metadata, cluster_metadata, query, prepared_keyspace, protocol_version): if not column_metadata: - return PreparedStatement(column_metadata, query_id, None, query, keyspace, protocol_version) + return PreparedStatement(column_metadata, query_id, None, query, prepared_keyspace, protocol_version) partition_key_columns = None routing_key_indexes = None @@ -370,7 +377,7 @@ def from_message(cls, query_id, column_metadata, cluster_metadata, query, keyspa pass # statement; just leave routing_key_indexes as None return PreparedStatement(column_metadata, query_id, routing_key_indexes, - query, keyspace, protocol_version) + query, prepared_keyspace, protocol_version) def bind(self, values): """ @@ -400,7 +407,7 @@ class BoundStatement(Statement): The :class:`PreparedStatement` instance that this was created from. """ - values = None + values = [] """ The sequence of values that were bound to the prepared statement. """ @@ -410,11 +417,15 @@ def __init__(self, prepared_statement, *args, **kwargs): `prepared_statement` should be an instance of :class:`PreparedStatement`. All other ``*args`` and ``**kwargs`` will be passed to :class:`.Statement`. """ + self.prepared_statement = prepared_statement + self.consistency_level = prepared_statement.consistency_level self.serial_consistency_level = prepared_statement.serial_consistency_level self.fetch_size = prepared_statement.fetch_size - self.prepared_statement = prepared_statement - self.values = [] + + meta = prepared_statement.column_metadata + if meta: + self.keyspace = meta[0][0] Statement.__init__(self, *args, **kwargs) @@ -429,7 +440,7 @@ def bind(self, values): values = () col_meta = self.prepared_statement.column_metadata - proto_version = self.prepared_statement._protocol_version + proto_version = self.prepared_statement.protocol_version # special case for binding dicts if isinstance(values, dict): @@ -471,6 +482,12 @@ def bind(self, values): "Too many arguments provided to bind() (got %d, expected %d)" % (len(values), len(col_meta))) + if self.prepared_statement.routing_key_indexes and \ + len(values) < len(self.prepared_statement.routing_key_indexes): + raise ValueError( + "Too few arguments provided to bind() (got %d, required %d for routing key)" % + (len(values), len(self.prepared_statement.routing_key_indexes))) + self.raw_values = values self.values = [] for value, col_spec in zip(values, col_meta): @@ -514,14 +531,6 @@ def routing_key(self): return self._routing_key - @property - def keyspace(self): - meta = self.prepared_statement.column_metadata - if meta: - return meta[0][0] - else: - return None - def __str__(self): consistency = ConsistencyLevel.value_to_name.get(self.consistency_level, 'Not Set') return (u'' % @@ -620,8 +629,8 @@ def __init__(self, batch_type=BatchType.LOGGED, retry_policy=None, .. code-block:: python batch = BatchStatement() - batch.add(SimpleStatement("INSERT INTO users (name, age) VALUES (%s, %s)", (name, age)) - batch.add(SimpleStatement("DELETE FROM pending_users WHERE name=%s", (name,)) + batch.add(SimpleStatement("INSERT INTO users (name, age) VALUES (%s, %s)"), (name, age)) + batch.add(SimpleStatement("DELETE FROM pending_users WHERE name=%s"), (name,)) session.execute(batch) .. versionadded:: 2.0.0 @@ -651,7 +660,7 @@ def add(self, statement, parameters=None): elif isinstance(statement, PreparedStatement): query_id = statement.query_id bound_statement = statement.bind(() if parameters is None else parameters) - self._maybe_set_routing_key(bound_statement) + self._maybe_set_routing_attributes(bound_statement) self._statements_and_parameters.append( (True, query_id, bound_statement.values)) elif isinstance(statement, BoundStatement): @@ -659,7 +668,7 @@ def add(self, statement, parameters=None): raise ValueError( "Parameters cannot be passed with a BoundStatement " "to BatchStatement.add()") - self._maybe_set_routing_key(statement) + self._maybe_set_routing_attributes(statement) self._statements_and_parameters.append( (True, statement.prepared_statement.query_id, statement.values)) else: @@ -668,7 +677,7 @@ def add(self, statement, parameters=None): if parameters: encoder = Encoder() if self._session is None else self._session.encoder query_string = bind_params(query_string, parameters, encoder) - self._maybe_set_routing_key(statement) + self._maybe_set_routing_attributes(statement) self._statements_and_parameters.append((False, query_string, ())) return self @@ -681,9 +690,11 @@ def add_all(self, statements, parameters): for statement, value in zip(statements, parameters): self.add(statement, parameters) - def _maybe_set_routing_key(self, statement): - if self.routing_key is None and statement.routing_key is not None: - self.routing_key = statement.routing_key + def _maybe_set_routing_attributes(self, statement): + if self.routing_key is None: + if statement.keyspace and statement.routing_key: + self.routing_key = statement.routing_key + self.keyspace = statement.keyspace def __str__(self): consistency = ConsistencyLevel.value_to_name.get(self.consistency_level, 'Not Set') diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index a4b90a00b2..4190a469de 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -153,7 +153,7 @@ def test_multiple_routing_key_indexes(self): self.assertIsInstance(prepared, PreparedStatement) bound = prepared.bind((1, 2)) - self.assertEqual(bound.routing_key, b'\x04\x00\x00\x00\x04\x00\x00\x00') + self.assertEqual(bound.routing_key, b'\x00\x04\x00\x00\x00\x01\x00\x00\x04\x00\x00\x00\x02\x00') def test_bound_keyspace(self): """ @@ -172,9 +172,6 @@ def test_bound_keyspace(self): bound = prepared.bind((1, 2)) self.assertEqual(bound.keyspace, 'test3rf') - bound.prepared_statement.column_metadata = None - self.assertEqual(bound.keyspace, None) - class PrintStatementTests(unittest.TestCase): """ @@ -436,7 +433,7 @@ def setUp(self): query = """ INSERT INTO test3rf.test (k, v) VALUES (?, ?) """ - self.simple_statement = SimpleStatement(query, routing_key='ss_rk') + self.simple_statement = SimpleStatement(query, routing_key='ss_rk', keyspace='keyspace_name') self.prepared = self.session.prepare(query) def tearDown(self): diff --git a/tests/unit/test_policies.py b/tests/unit/test_policies.py index 7fb99b8ceb..ab098f3926 100644 --- a/tests/unit/test_policies.py +++ b/tests/unit/test_policies.py @@ -381,7 +381,7 @@ def get_replicas(keyspace, packed_key): policy.populate(cluster, hosts) for i in range(4): - query = Statement(routing_key=struct.pack('>i', i)) + query = Statement(routing_key=struct.pack('>i', i), keyspace='keyspace_name') qplan = list(policy.make_query_plan(None, query)) replicas = get_replicas(None, struct.pack('>i', i)) @@ -420,7 +420,7 @@ def get_replicas(keyspace, packed_key): policy.populate(cluster, hosts) for i in range(4): - query = Statement(routing_key=struct.pack('>i', i)) + query = Statement(routing_key=struct.pack('>i', i), keyspace='keyspace_name') qplan = list(policy.make_query_plan(None, query)) replicas = get_replicas(None, struct.pack('>i', i)) From 8f0e72f484df077ad08012bd2011bddd313be655 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 3 Dec 2014 13:18:20 -0600 Subject: [PATCH 1060/3726] Assign key aliases from json, restore guard against empty string --- cassandra/metadata.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 4c6b7ec898..4d3897ab84 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -268,15 +268,15 @@ def _build_table_metadata(self, keyspace_metadata, row, col_rows, trigger_rows): table_meta.comparator = comparator # partition key - key_aliases = row.get("key_aliases") partition_rows = [row for row in col_rows.get(cfname) - if row.get('type', None) == "partition_key"] + if row.get('type', None) == "partition_key"] if len(partition_rows) > 1: partition_rows = sorted(partition_rows, key=lambda row: row.get('component_index')) + key_aliases = row.get("key_aliases") if key_aliases is not None: - json.loads(key_aliases) + key_aliases = json.loads(key_aliases) if key_aliases else [] else: # In 2.0+, we can use the 'type' column. In 3.0+, we have to use it. key_aliases = [row.get('column_name') for row in partition_rows] From 101e2653f3b355ed4801703109cc869c46486d01 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 3 Dec 2014 21:06:45 +0000 Subject: [PATCH 1061/3726] Lookup cf_col_rows once, with a default value fixes a problem trying to iterate 'None' when the cf has no rows in col_rows --- cassandra/metadata.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 4d3897ab84..d14f8c3588 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -214,6 +214,7 @@ def _build_usertype(self, keyspace, usertype_row): def _build_table_metadata(self, keyspace_metadata, row, col_rows, trigger_rows): cfname = row["columnfamily_name"] + cf_col_rows = col_rows.get(cfname, []) comparator = types.lookup_casstype(row["comparator"]) if issubclass(comparator, types.CompositeType): @@ -228,7 +229,7 @@ def _build_table_metadata(self, keyspace_metadata, row, col_rows, trigger_rows): column_aliases = row.get("column_aliases", None) - clustering_rows = [row for row in col_rows.get(cfname) + clustering_rows = [row for row in cf_col_rows if row.get('type', None) == "clustering_key"] if len(clustering_rows) > 1: clustering_rows = sorted(clustering_rows, key=lambda row: row.get('component_index')) @@ -257,7 +258,7 @@ def _build_table_metadata(self, keyspace_metadata, row, col_rows, trigger_rows): clustering_size = num_column_name_components else: is_compact = True - if column_aliases or not col_rows.get(cfname): + if column_aliases or not cf_col_rows: has_value = True clustering_size = num_column_name_components else: @@ -268,7 +269,7 @@ def _build_table_metadata(self, keyspace_metadata, row, col_rows, trigger_rows): table_meta.comparator = comparator # partition key - partition_rows = [row for row in col_rows.get(cfname) + partition_rows = [row for row in cf_col_rows if row.get('type', None) == "partition_key"] if len(partition_rows) > 1: @@ -313,7 +314,7 @@ def _build_table_metadata(self, keyspace_metadata, row, col_rows, trigger_rows): # value alias (if present) if has_value: - value_alias_rows = [row for row in col_rows.get(cfname) + value_alias_rows = [row for row in cf_col_rows if row.get('type', None) == "compact_value"] if not key_aliases: # TODO are we checking the right thing here? @@ -336,10 +337,9 @@ def _build_table_metadata(self, keyspace_metadata, row, col_rows, trigger_rows): table_meta.columns[value_alias] = col # other normal columns - if col_rows: - for col_row in col_rows[cfname]: - column_meta = self._build_column_metadata(table_meta, col_row) - table_meta.columns[column_meta.name] = column_meta + for col_row in cf_col_rows: + column_meta = self._build_column_metadata(table_meta, col_row) + table_meta.columns[column_meta.name] = column_meta if trigger_rows: for trigger_row in trigger_rows[cfname]: From 0a33575b24596d730aac448de96d7f097760d7af Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 3 Dec 2014 21:24:39 +0000 Subject: [PATCH 1062/3726] Disambiguate row parameter in list comprehensions from function argument --- cassandra/metadata.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index d14f8c3588..f8a16b5569 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -229,8 +229,8 @@ def _build_table_metadata(self, keyspace_metadata, row, col_rows, trigger_rows): column_aliases = row.get("column_aliases", None) - clustering_rows = [row for row in cf_col_rows - if row.get('type', None) == "clustering_key"] + clustering_rows = [r for r in cf_col_rows + if r.get('type', None) == "clustering_key"] if len(clustering_rows) > 1: clustering_rows = sorted(clustering_rows, key=lambda row: row.get('component_index')) @@ -269,8 +269,8 @@ def _build_table_metadata(self, keyspace_metadata, row, col_rows, trigger_rows): table_meta.comparator = comparator # partition key - partition_rows = [row for row in cf_col_rows - if row.get('type', None) == "partition_key"] + partition_rows = [r for r in cf_col_rows + if r.get('type', None) == "partition_key"] if len(partition_rows) > 1: partition_rows = sorted(partition_rows, key=lambda row: row.get('component_index')) @@ -280,14 +280,14 @@ def _build_table_metadata(self, keyspace_metadata, row, col_rows, trigger_rows): key_aliases = json.loads(key_aliases) if key_aliases else [] else: # In 2.0+, we can use the 'type' column. In 3.0+, we have to use it. - key_aliases = [row.get('column_name') for row in partition_rows] + key_aliases = [r.get('column_name') for r in partition_rows] key_validator = row.get("key_validator") if key_validator is not None: key_type = types.lookup_casstype(key_validator) key_types = key_type.subtypes if issubclass(key_type, types.CompositeType) else [key_type] else: - key_types = [types.lookup_casstype(row.get('validator')) for row in partition_rows] + key_types = [types.lookup_casstype(r.get('validator')) for r in partition_rows] for i, col_type in enumerate(key_types): if len(key_aliases) > i: @@ -314,8 +314,8 @@ def _build_table_metadata(self, keyspace_metadata, row, col_rows, trigger_rows): # value alias (if present) if has_value: - value_alias_rows = [row for row in cf_col_rows - if row.get('type', None) == "compact_value"] + value_alias_rows = [r for r in cf_col_rows + if r.get('type', None) == "compact_value"] if not key_aliases: # TODO are we checking the right thing here? value_alias = "value" From 1e4655a2452d9737e5a137a9007b4c3c7d24b7e2 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 3 Dec 2014 21:57:58 +0000 Subject: [PATCH 1063/3726] Disambiguate row parameter in list comprehensions from function argument --- cassandra/metadata.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index f8a16b5569..0e610d3b28 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -237,7 +237,7 @@ def _build_table_metadata(self, keyspace_metadata, row, col_rows, trigger_rows): if column_aliases is not None: column_aliases = json.loads(column_aliases) else: - column_aliases = [row.get('column_name') for row in clustering_rows] + column_aliases = [r.get('column_name') for r in clustering_rows] if is_composite: if issubclass(last_col, types.ColumnToCollectionType): From b0bb243db5d8f5d121c6d870e43c16509ec4055b Mon Sep 17 00:00:00 2001 From: Tyler Hobbs Date: Wed, 3 Dec 2014 16:10:05 -0600 Subject: [PATCH 1064/3726] Minor doc clarification about concurrency limits --- cassandra/concurrent.py | 6 ++++-- docs/performance.rst | 5 ++++- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/cassandra/concurrent.py b/cassandra/concurrent.py index 93b954ffa3..e5ab4528e3 100644 --- a/cassandra/concurrent.py +++ b/cassandra/concurrent.py @@ -41,11 +41,13 @@ def execute_concurrent(session, statements_and_parameters, concurrency=100, rais raised. The `concurrency` parameter controls how many statements will be executed - concurrently. It is recommended that this be kept below the number of + concurrently. When :attr:`.Cluster.protocol_version` is set to 1 or 2, + it is recommended that this be kept below 100 times the number of core connections per host times the number of connected hosts (see :meth:`.Cluster.set_core_connections_per_host`). If that amount is exceeded, the event loop thread may attempt to block on new connection creation, - substantially impacting throughput. + substantially impacting throughput. If :attr:`~.Cluster.protocol_version` + is 3 or higher, you can safely experiment with higher levels of concurrency. Example usage:: diff --git a/docs/performance.rst b/docs/performance.rst index 8ba9aed9dd..cb378f75a5 100644 --- a/docs/performance.rst +++ b/docs/performance.rst @@ -242,7 +242,10 @@ dramatically: ~/python-driver $ python benchmarks/callback_full_pipeline.py -n 100000 --hosts=127.0.0.1,127.0.0.2,127.0.0.3 --libev-only --threads=1 Average throughput: 679.61/sec -Until this is improved, you should limit the number of callback chains you run. +When :attr:`.Cluster.protocol_version` is set to 1 or 2, you should limit the +number of callback chains you run to rougly 100 per node in the cluster. +When :attr:`~.Cluster.protocol_version` is 3 or higher, you can safely experiment +with higher numbers of callback chains. For many use cases, you don't need to implement this pattern yourself. You can simply use :meth:`cassandra.concurrent.execute_concurrent` and From e677ee510f1c7559170e59ba327e7f50eb67ac69 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 3 Dec 2014 22:18:29 +0000 Subject: [PATCH 1065/3726] Set PreparedStatement.values in init peer review input --- cassandra/query.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cassandra/query.py b/cassandra/query.py index 22871f5d29..f9e0c08696 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -407,7 +407,7 @@ class BoundStatement(Statement): The :class:`PreparedStatement` instance that this was created from. """ - values = [] + values = None """ The sequence of values that were bound to the prepared statement. """ @@ -422,6 +422,7 @@ def __init__(self, prepared_statement, *args, **kwargs): self.consistency_level = prepared_statement.consistency_level self.serial_consistency_level = prepared_statement.serial_consistency_level self.fetch_size = prepared_statement.fetch_size + self.values = [] meta = prepared_statement.column_metadata if meta: From e5edc89b59c1595bdc65435e72c4dfba2ba45c63 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 3 Dec 2014 22:19:14 +0000 Subject: [PATCH 1066/3726] Stop using strange dict(dict) for routing key test. --- tests/integration/standard/test_query.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index 4190a469de..373f5260b1 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -149,12 +149,16 @@ def test_multiple_routing_key_indexes(self): """ INSERT INTO test3rf.test (k, v) VALUES (?, ?) """) - prepared.routing_key_indexes = {0: {0: 0}, 1: {1: 1}} - self.assertIsInstance(prepared, PreparedStatement) + + prepared.routing_key_indexes = [0, 1] bound = prepared.bind((1, 2)) self.assertEqual(bound.routing_key, b'\x00\x04\x00\x00\x00\x01\x00\x00\x04\x00\x00\x00\x02\x00') + prepared.routing_key_indexes = [1, 0] + bound = prepared.bind((1, 2)) + self.assertEqual(bound.routing_key, b'\x00\x04\x00\x00\x00\x02\x00\x00\x04\x00\x00\x00\x01\x00') + def test_bound_keyspace(self): """ Ensure that bound.keyspace works as expected From 5b0eb474aac716b7508a6b3f45447a198d1522dc Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 3 Dec 2014 22:51:26 +0000 Subject: [PATCH 1067/3726] Reuse local variable schema_ver peer review input --- cassandra/cluster.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index bc64c139dc..8da292db21 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2215,7 +2215,7 @@ def _get_schema_mismatches(self, peers_result, local_result, local_address): peer = self._cluster.metadata.get_host(addr) if peer and peer.is_up: - versions[row.get("schema_version")].add(addr) + versions[schema_ver].add(addr) if len(versions) == 1: log.debug("[control connection] Schemas match") From 4f7dfe330716ab683f83d1bc6b04c2169b95dadb Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 3 Dec 2014 23:17:04 +0000 Subject: [PATCH 1068/3726] Bail on host with not metadata before adding/removing peer review input --- cassandra/cluster.py | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 7d02c3f6dc..de4b43b35b 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2061,23 +2061,21 @@ def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, addr = row.get("peer") tokens = row.get("tokens") - peer_meta_complete = bool(tokens) + if not tokens: + log.warn("Excluding host (%s) with no tokens in system.peers table of %s." % (addr, connection.host)) + continue + + found_hosts.add(addr) host = self._cluster.metadata.get_host(addr) datacenter = row.get("data_center") rack = row.get("rack") if host is None: log.debug("[control connection] Found new host to connect to: %s", addr) - host = self._cluster.add_host(addr, datacenter, rack, signal=peer_meta_complete) - should_rebuild_token_map |= peer_meta_complete + host = self._cluster.add_host(addr, datacenter, rack, signal=True) + should_rebuild_token_map = True else: - should_rebuild_token_map |= self._update_location_info(host, datacenter, rack) and peer_meta_complete - - if not peer_meta_complete: - log.warn("Excluding host (%s) with no tokens in system.peers table of %s." % (addr, connection.host)) - continue - - found_hosts.add(addr) + should_rebuild_token_map |= self._update_location_info(host, datacenter, rack) if partitioner and tokens: token_map[host] = tokens From f723c7c26cc123405be6b7fa4b4d44718ddf73a8 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 3 Dec 2014 23:19:45 +0000 Subject: [PATCH 1069/3726] Remove extraneous file from git. --- cassandra/io/libevwrapper.sox | Bin 17896 -> 0 bytes 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100755 cassandra/io/libevwrapper.sox diff --git a/cassandra/io/libevwrapper.sox b/cassandra/io/libevwrapper.sox deleted file mode 100755 index 8fd23f01b425b802592e745a9ed2b9e929f66875..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 17896 zcmeHP4{%(?dEb*IBV!w#sZC6$z%#}?olZ~I#izT= z-IFcF5Y(|1d?N&+C4>?JBB8;wK+JTSdI}|)n!%pq+^pcE9iU?e~4V`|ZAc@7`Pa=kLGq(HTPc<_ICmkryBjl?l-YO6gqW z)g?lh=GKO7Tcv1X5ts-CWB3$sBj$3oY1&q|%>cyfFLIWudyzVUM7UJ3sw@?~ZfKfW zyQ?)@5PAJw_!(98IBO906|^x{_E+p5`7=ALMB2(g&+G35`@4ai5Jp&vX;S}USvwO? z?qm(GzjYK?qyy|kV<6XgZL@?f{9W4sH@e8o>YQ8QAPq;3Mrjj$w)@<>rm^>emHOe61vUB_Mx#Yys zyc)=++0?k{>W2D8y$1xT7AYW0As9PL6sB>yebF*sihS^5A)Wv|bcGPLP?`@uf=uZS z&SW=;yaIW;bog>1;G&q&{xv{!8BjY9{`Q+cdi=gGUpW-Ix##Ij$@f=5=0_H#X9_Xz zOu5h10+Oz}1Zf49iR!LwraF;|MHAJDcq?UVch$NoEXsH*6eijEe2*b~KDJX9%{^J$ z>a5yOw3=)u#3FQLqTS9|%*>ydTVr1~iRHY8=U&p-L>jr}mFLK+*A}DdCtUSKRdpt0u=>f_ zs5-<|OQ~w$@~iQvWeluu`7DtoYoJl{T=?)icrJHtgQk%?V)VUKvH#ZyAk;2|_TRt| z@_%{2<(^~0<&!WtW)@%rycjaP{btbdR-9%qd&K;!Qzi21T3 zLeTj2pSS+M{)tP9cug$%po%b@ZGLPh}So z$+I21+juoHX1@Up%<>K^o7FBNO|FfnYEpzcH}KZ{&_Tcfkt2 z&Lv-Y0d3ECy!tmP{V}|vm{K3>TgERqA$|y)j^E4!EBhb5uG!f4I=cEn(?D4-#%T;( zJ8BGUbU2*PIxYC|(l;D_T88lk#Bh|FZbt7UpQ}UTH#g%Z+XD?X@7CwW5nKQE@QqkH{bkQDq|j0PPhtT%yFb?!uUeeON# z!q_9=F~{h{<1GoU#FSh3Hk?>{=Dh*_56k=%x|bTzs7_LqgXNN!g0-S z0P}ma;W(}Hrhz2^L<7_8V49EP$~nw7oG#SU7If(}SKY$9$uM{IYFwCyd%@3dIG(G! zjF-g876kXF_#v+rLImr}`4zTEy%1K@G_G_ngmYlUKzcUC=SXF&5^;9|I;P=%Ed)3DNord!tK=u6y>-wW; zvSzp`_p7Gd`?(jLy{I_(na2KG4AlLQ>!_j?6;rQ7Qg+TDhqMxJ2;UescZ{6}JVpO7 z3QR_EJzhv&`^z4oyTvr?ae_X*pn!hQwED4I{5!@vXd{;E$%x&uc*oeb0_OF)*Y7+9 z*-Z9tY}vF;)Vw5@oPiDdBg}f=a2M^luh5<@ulbcUb04DSN!oaFC-koCouBKPv2)bC zoBwu@+Lw1*yY9!}g(D?>!&r)byR0$y^2sltg*}B9et=8SxfhCXVC3g_E#%(gJ_Jcq zZk*S|g!>QeFS!q#y)bK(+l#;EUFSuhrrdju35q5zq9*b}FR2OkM*l5-=Z8wKRO#(F zGMKLv;PaIwvLt!Fa;_}tDJEZerYsl8@+?{UWqGzN7t8V-SuU03XJxremYr1*Kn-=+96;twd^NBm8S|1Gx9v9#jlj7eXKBV|D;@2tuRpM7G{uSbvftN1@_hh1J`c%+xow=nUTu=1+Esa|nh-uie zW%Cx%luD&V<7N@bSm|iS65(u5GA3ftY&M!~%S5ZgcYz;ft{8eXpe($v$C;FJZ+~kf%Z(QGmuTic3HN_+R=I`WYDOXZ;;o&&Lx^VM=3+PE=*Z^QIC9aMZX%vFqcJo|QO3p16h0Zi&GMEZXj}4E3q_rnt?N;%X3!%!ymz%h4nyc3Y z?C35l8%U-C(ag@SPRv8r6)M-nbR{!ZySSFhjFpUG{xPAhdfGtgGY#(EVl^@)qPSg9 z^7-)p3Q^*tZ)qV2DC_TH#FO0uu@G`;Q5t{^-91s^i_8%vz1Vz5ONB33CQ9&RJ;#p= zVDJp#^OuW~VO(sB=9f|zWRg$o;Cx@-B7EI`pMSv_<%6hQ1sV0R{4Kgg8&Rrr783h5Z}Xkw@am|_3om&x0OFUva6*1F{J)Ih3v3D z!ZJ;v_e)fa&p>Yh#(0YLHnF>@#<3210q!6Dyp&0|d)2 z)Z0(7#r{O;bmMpwdgU|FI}SZR>z!^KA40E|^=haOlsx00-)*a~wj#JkP@*11^*givQ=cmv+JOjPAp*O^Om26)a zw`*N5!Y=CmXLIUdJNPg8d-yoh51E$BAL>FZW*T665z|_x^-SrV2&EgCb}+S>_AFZ2SF)ihQ_?a$ay1Ec3)h~I64)D!BfE!&$s?Gg?rB_y;`os)0WfC{~Wdtc`PHm&OG8z9P0V6 zoZ+~<9SW`VN9Fdbrg zkm(_&hnWsDJ;ro|=?SKzOdY1;^ZEGhHa7xFdLyiNnDv5ajO5)+J3z^8nBrGE5+l0N>E za)Q!h#S4b!ga&>7U@$V` zYe6|&GUN*e{r+Git=>?#VUBgW1dJOu0JqQaiTQu@i)PY6A4L_4G{5M31bCGTKMA}a zStrt8g*-$I5)nFc0*pQ+xe> z;ykw3SldTD?Yl9xuYc!X#P%9%`w@@58d;>hK zAC1AP>xYby2$TH~0VKj?uiu;WgC~29sh-xaJ6>e(#$>PGt9%PQ*=r1TM)vx>%l&Mx zv9|Za4<*9Xz8h2f`n?Q&L!tIH20NqcZwl;U}S>h*rVn6ci!S1{K5@mj`uf4zdS-Y=UN>;3OW#(F>NWUTk6y^QsKb1P%L ze+)9#`@we^xA1y@n6bWI4{3d_f10tro_@wyUmq_q*6aN>#(F)EYkOXgA8LL5U8)rE zbFH7njP?3i$yl$SKVYoa&nB(EQq7O4^%?hQeO@npjP?4tm$6sNU27d^P%gSUHd%7gnnn4d=r{rO8z{WBi?ya&^B zrF%RddhinU{8{k7#)IoUc)JHDJ@_UMzQcnbP#9O8N(U827ghRe4?gO_$31w|gWvSv zk35*pR-my$dn-J6jRy~+Tl5Y6kCFcbna(?e$n_Sx&#UEd0stad`s^h6@G2xt5Y=$mcyW)wqYWyW< zx~el8OJ%#)):t?jP8#JptP+Ka0$o!H2PHc~D-v94_s+gb+LN|)n5coOU3(JoEF!4~IJGU;Dr<(nvnQ3oNfcCWNa5cixDsbTn^HK-2tgemz%=VRtk^D+;BcbZY}w7~U!-8c^_jSB z;dEg%o|VTnNxVCnFdK2nZ0*QcQRQCBwpdx5G_{midL+%}=x(ow4arA5nuvehYLnXC zF|(mNW~J?TDyao@iO5rvB)F=vso9R&mf4WZcHywBTeQVWSkbJMZB+X}xUH=*Y1gHa z*ea5AVfPd(Hp`>4BvTwNq$_b?w!Uy))>TE3x)K+i?iO{)yvj;inRsjy?MIOwDcXWy z^bnH4QEjcmT>$?q0%V&~NeiLvilHa?cQxoBs4%VFym70&xd`O}U8=@j$~&C~w1a}j m4nX@P{{4^?8r1@MH2|b}8!y@xCF~&6g&;#!ZJxMO3-R9oZ9(q< From 1e1d97efbd321127ee62164ce739a15275e4c6a8 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 4 Dec 2014 11:33:19 -0600 Subject: [PATCH 1070/3726] Fix response future callback tests Assertions on callback were being swallowed because result and final exception are both set by the time result() was called. --- tests/unit/test_response_future.py | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/tests/unit/test_response_future.py b/tests/unit/test_response_future.py index 32c635bfb1..18ed4f02d3 100644 --- a/tests/unit/test_response_future.py +++ b/tests/unit/test_response_future.py @@ -327,12 +327,18 @@ def test_callback(self): rf = self.make_response_future(session) rf.send_request() - rf.add_callback(self.assertEqual, [{'col': 'val'}]) + callback = Mock() + expected_result = [{'col': 'val'}] + arg = "positional" + kwargs = {'one': 1, 'two': 2} + rf.add_callback(callback, arg, **kwargs) - rf._set_result(self.make_mock_response([{'col': 'val'}])) + rf._set_result(self.make_mock_response(expected_result)) result = rf.result() - self.assertEqual(result, [{'col': 'val'}]) + self.assertEqual(result, expected_result) + + callback.assert_called_once_with(expected_result, arg, **kwargs) # this should get called immediately now that the result is set rf.add_callback(self.assertEqual, [{'col': 'val'}]) @@ -383,12 +389,18 @@ def test_add_callbacks(self): rf = ResponseFuture(session, message, query) rf.send_request() + callback = Mock() + expected_result = [{'col': 'val'}] + arg = "positional" + kwargs = {'one': 1, 'two': 2} rf.add_callbacks( - callback=self.assertEqual, callback_args=([{'col': 'val'}],), + callback=callback, callback_args=(arg,), callback_kwargs=kwargs, errback=self.assertIsInstance, errback_args=(Exception,)) - rf._set_result(self.make_mock_response([{'col': 'val'}])) - self.assertEqual(rf.result(), [{'col': 'val'}]) + rf._set_result(self.make_mock_response(expected_result)) + self.assertEqual(rf.result(), expected_result) + + callback.assert_called_once_with(expected_result, arg, **kwargs) def test_prepared_query_not_found(self): session = self.make_session() From b0bcaccbb3f7652fc3d8a74a452b0609693fce82 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 4 Dec 2014 11:58:33 -0600 Subject: [PATCH 1071/3726] Tests for multiple callbacks, errbacks PYTHON-182 github #214 --- tests/unit/test_response_future.py | 59 +++++++++++++++++++++++++++++- 1 file changed, 58 insertions(+), 1 deletion(-) diff --git a/tests/unit/test_response_future.py b/tests/unit/test_response_future.py index 18ed4f02d3..bdb2ccb534 100644 --- a/tests/unit/test_response_future.py +++ b/tests/unit/test_response_future.py @@ -19,7 +19,7 @@ from mock import Mock, MagicMock, ANY -from cassandra import ConsistencyLevel +from cassandra import ConsistencyLevel, Unavailable from cassandra.cluster import Session, ResponseFuture, NoHostAvailable from cassandra.connection import Connection, ConnectionException from cassandra.protocol import (ReadTimeoutErrorMessage, WriteTimeoutErrorMessage, @@ -366,6 +366,63 @@ def test_errback(self): # this should get called immediately now that the error is set rf.add_errback(self.assertIsInstance, Exception) + def test_multiple_callbacks(self): + session = self.make_session() + rf = self.make_response_future(session) + rf.send_request() + + callback = Mock() + expected_result = [{'col': 'val'}] + arg = "positional" + kwargs = {'one': 1, 'two': 2} + rf.add_callback(callback, arg, **kwargs) + + callback2 = Mock() + arg2 = "another" + kwargs2 = {'three': 3, 'four': 4} + rf.add_callback(callback2, arg2, **kwargs2) + + rf._set_result(self.make_mock_response(expected_result)) + + result = rf.result() + self.assertEqual(result, expected_result) + + callback.assert_called_once_with(expected_result, arg, **kwargs) + callback2.assert_called_once_with(expected_result, arg2, **kwargs2) + + def test_multiple_errbacks(self): + session = self.make_session() + pool = session._pools.get.return_value + connection = Mock(spec=Connection) + pool.borrow_connection.return_value = (connection, 1) + + query = SimpleStatement("INSERT INFO foo (a, b) VALUES (1, 2)") + query.retry_policy = Mock() + query.retry_policy.on_unavailable.return_value = (RetryPolicy.RETHROW, None) + message = QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE) + + rf = ResponseFuture(session, message, query) + rf.send_request() + + callback = Mock() + arg = "positional" + kwargs = {'one': 1, 'two': 2} + rf.add_errback(callback, arg, **kwargs) + + callback2 = Mock() + arg2 = "another" + kwargs2 = {'three': 3, 'four': 4} + rf.add_errback(callback2, arg2, **kwargs2) + + expected_exception = Unavailable("message", 1, 2, 3) + result = Mock(spec=UnavailableErrorMessage, info={'something': 'here'}) + result.to_exception.return_value = expected_exception + rf._set_result(result) + self.assertRaises(Exception, rf.result) + + callback.assert_called_once_with(expected_exception, arg, **kwargs) + callback2.assert_called_once_with(expected_exception, arg2, **kwargs2) + def test_add_callbacks(self): session = self.make_session() query = SimpleStatement("INSERT INFO foo (a, b) VALUES (1, 2)") From 00ae05c63ce6f39b39c25d45e29a5af20cf7b170 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 5 Dec 2014 10:03:19 -0600 Subject: [PATCH 1072/3726] Timestamp deserialization workaround for Windows PYTHON-119 Fixes Windows-specific problem forming datetime from timestamps < -43200 (1969-12-31 12:00:00) --- cassandra/cqltypes.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index 9fae266818..86e33e8eea 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -36,7 +36,7 @@ import re import socket import time -from datetime import datetime +from datetime import datetime, timedelta from uuid import UUID import warnings @@ -565,7 +565,13 @@ def my_timestamp(self): @staticmethod def deserialize(byts, protocol_version): - return datetime.utcfromtimestamp(int64_unpack(byts) / 1000.0) + timestamp = int64_unpack(byts) / 1000.0 + if timestamp >= 0: + dt = datetime.utcfromtimestamp(timestamp) + else: + # PYTHON-119: workaround for Windows + dt = datetime(1970, 1, 1) + timedelta(seconds=timestamp) + return dt @staticmethod def serialize(v, protocol_version): From 4f613a9a34883d92021c0841c26583120e56a835 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 10 Dec 2014 15:11:18 -0600 Subject: [PATCH 1073/3726] DateType timestamp deserialization test for PYTHON-119 --- cassandra/cqltypes.py | 2 +- tests/unit/test_types.py | 50 +++++++++++++++++++++++++++------------- 2 files changed, 35 insertions(+), 17 deletions(-) diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index 86e33e8eea..100c2968c9 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -575,7 +575,6 @@ def deserialize(byts, protocol_version): @staticmethod def serialize(v, protocol_version): - global _have_warned_about_timestamps try: converted = calendar.timegm(v.utctimetuple()) converted = converted * 1e3 + getattr(v, 'microsecond', 0) / 1e3 @@ -584,6 +583,7 @@ def serialize(v, protocol_version): if type(v) not in _number_types: raise TypeError('DateType arguments must be a datetime or timestamp') + global _have_warned_about_timestamps if not _have_warned_about_timestamps: _have_warned_about_timestamps = True warnings.warn( diff --git a/tests/unit/test_types.py b/tests/unit/test_types.py index f389a54a74..dc68ccebb0 100644 --- a/tests/unit/test_types.py +++ b/tests/unit/test_types.py @@ -20,11 +20,12 @@ from binascii import unhexlify import datetime +import time import cassandra from cassandra.cqltypes import (BooleanType, lookup_casstype_simple, lookup_casstype, LongType, DecimalType, SetType, cql_typename, CassandraType, UTF8Type, parse_casstype_args, - EmptyValue, _CassandraType, DateType) + EmptyValue, _CassandraType, DateType, int64_pack) from cassandra.query import named_tuple_factory from cassandra.protocol import (write_string, read_longstring, write_stringmap, read_stringmap, read_inet, write_inet, @@ -99,11 +100,7 @@ def test_lookup_casstype(self): # like "MapType(AsciiType, IntegerType)" and "ReversedType(AsciiType)" self.assertEqual(str(lookup_casstype(BooleanType(True))), str(BooleanType(True))) - def test_cassandratype(self): - """ - Smoke test cass_parameterized_type_with - """ - + def test_casstype_parameterized(self): self.assertEqual(LongType.cass_parameterized_type_with(()), 'LongType') self.assertEqual(LongType.cass_parameterized_type_with((), full=True), 'org.apache.cassandra.db.marshal.LongType') self.assertEqual(SetType.cass_parameterized_type_with([DecimalType], full=True), 'org.apache.cassandra.db.marshal.SetType(org.apache.cassandra.db.marshal.DecimalType)') @@ -115,11 +112,7 @@ def test_cassandratype(self): 'map', cassandra.cqltypes.MapType.apply_parameters(subtypes).cql_parameterized_type()) - def test_datetype(self): - """ - Test cassandra.cqltypes.DateType() construction - """ - + def test_datetype_from_string(self): # Ensure all formats can be parsed, without exception for format in cassandra.cqltypes.cql_time_formats: date_string = str(datetime.datetime.now().strftime(format)) @@ -180,16 +173,41 @@ class BarType(FooType): def test_empty_value(self): self.assertEqual(str(EmptyValue()), 'EMPTY') - def test_CassandraType(self): + def test_cassandratype_base(self): cassandra_type = _CassandraType('randomvaluetocheck') self.assertEqual(cassandra_type.val, 'randomvaluetocheck') self.assertEqual(cassandra_type.validate('randomvaluetocheck2'), 'randomvaluetocheck2') self.assertEqual(cassandra_type.val, 'randomvaluetocheck') - def test_DateType(self): - now = datetime.datetime.now() - date_type = DateType(now) - self.assertEqual(date_type.my_timestamp(), now) + def test_datetype(self): + now_timestamp = time.time() + now_datetime = datetime.datetime.utcfromtimestamp(now_timestamp) + + # same results serialized + # (this could change if we follow up on the timestamp multiplication warning in DateType.serialize) + self.assertEqual(DateType.serialize(now_datetime, 0), DateType.serialize(now_timestamp, 0)) + + # from timestamp + date_type = DateType(now_timestamp) + self.assertEqual(date_type.my_timestamp(), now_timestamp) + + # from datetime object + date_type = DateType(now_datetime) + self.assertEqual(date_type.my_timestamp(), now_datetime) + + # deserialize + # epoc + expected = 0 + self.assertEqual(DateType.deserialize(int64_pack(1000 * expected), 0), datetime.datetime.utcfromtimestamp(expected)) + + # beyond 32b + expected = long(2**33) + self.assertEqual(DateType.deserialize(int64_pack(1000 * expected), 0), datetime.datetime.utcfromtimestamp(expected)) + + # less than epoc (PYTHON-119) + expected = -770172256 + self.assertEqual(DateType.deserialize(int64_pack(1000 * expected), 0), datetime.datetime(1945, 8, 5, 23, 15, 44)) + self.assertRaises(ValueError, date_type.interpret_datestring, 'fakestring') def test_write_read_string(self): From 7e09f19e08319314897bfb97f8e5ea9cba9e1221 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 10 Dec 2014 16:12:32 -0600 Subject: [PATCH 1074/3726] Don't use 'long' in unit tests. breaks python 3 --- tests/unit/test_types.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unit/test_types.py b/tests/unit/test_types.py index dc68ccebb0..5758c508aa 100644 --- a/tests/unit/test_types.py +++ b/tests/unit/test_types.py @@ -201,7 +201,7 @@ def test_datetype(self): self.assertEqual(DateType.deserialize(int64_pack(1000 * expected), 0), datetime.datetime.utcfromtimestamp(expected)) # beyond 32b - expected = long(2**33) + expected = 2**33 self.assertEqual(DateType.deserialize(int64_pack(1000 * expected), 0), datetime.datetime.utcfromtimestamp(expected)) # less than epoc (PYTHON-119) From 398ae7e697227e48bad829e7468a085128beb658 Mon Sep 17 00:00:00 2001 From: Irina Kaplounova Date: Thu, 11 Dec 2014 00:37:40 -0800 Subject: [PATCH 1075/3726] Test for PYTHON-173 --- tests/integration/standard/test_metadata.py | 34 +++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 0899e859a9..9affb429a5 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -565,3 +565,37 @@ def test_getting_replicas(self): replicas = token_map.get_replicas("ks", MD5Token(str(token.value + 1))) expected_host = hosts[(i + 1) % len(hosts)] self.assertEqual(set(replicas), set([expected_host])) + + +class TableAlterMetadata(unittest.TestCase): + """ + Test verifies that table metadata is preserved on keyspace alter + """ + def setUp(self): + self.cluster = Cluster(protocol_version=PROTOCOL_VERSION) + self.session = self.cluster.connect() + name = self._testMethodName.lower() + crt_ks = ''' + CREATE KEYSPACE %s WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1} AND durable_writes = true''' % name + self.session.execute(crt_ks) + + def tearDown(self): + name = self._testMethodName.lower() + self.session.execute('DROP KEYSPACE %s' % name) + self.cluster.shutdown() + + def test_keyspace_alter(self): + """ + Table info is preserved upon keyspace alter: + Create table + Verify schema + Alter ks + Verify that table metadata is still present + """ + name = self._testMethodName.lower() + + self.session.execute('CREATE TABLE %s.d (d INT PRIMARY KEY)' % name) + self.assertTrue(self.cluster.metadata.keyspaces[name].tables) + + self.session.execute('ALTER KEYSPACE %s WITH durable_writes = false' %name) + self.assertTrue(self.cluster.metadata.keyspaces[name].tables) From 6cc069b10ae7700b7528a3ab721e060f5202103d Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 11 Dec 2014 11:44:41 -0600 Subject: [PATCH 1076/3726] Skip alias test when < 2.0, only create table once --- .../standard/test_row_factories.py | 34 ++++++++++--------- 1 file changed, 18 insertions(+), 16 deletions(-) diff --git a/tests/integration/standard/test_row_factories.py b/tests/integration/standard/test_row_factories.py index 189a0a9d15..c697003a1f 100644 --- a/tests/integration/standard/test_row_factories.py +++ b/tests/integration/standard/test_row_factories.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from tests.integration import PROTOCOL_VERSION +from tests.integration import get_server_versions, PROTOCOL_VERSION try: import unittest2 as unittest @@ -143,29 +143,28 @@ class NamedTupleFactoryAndNumericColNamesTests(unittest.TestCase): """ Test for PYTHON-122: Improve Error Handling/Reporting for named_tuple_factory and Numeric Column Names """ - def setUp(self): - self.cluster = Cluster(protocol_version=PROTOCOL_VERSION) - self.session = self.cluster.connect() + @classmethod + def setup_class(cls): + cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION) + cls.session = cls.cluster.connect() + cls._cass_version, cls._cql_version = get_server_versions() ddl = ''' - CREATE TABLE test3rf.ToBeIndexed ( key blob PRIMARY KEY, "626972746864617465" blob ) + CREATE TABLE test1rf.table_num_col ( key blob PRIMARY KEY, "626972746864617465" blob ) WITH COMPACT STORAGE''' + cls.session.execute(ddl) - self.session.execute(ddl) - - def tearDown(self): - """ - Shutdown cluster - """ - self.session.execute("DROP TABLE test3rf.ToBeIndexed") - self.cluster.shutdown() + @classmethod + def teardown_class(cls): + cls.session.execute("DROP TABLE test1rf.table_num_col") + cls.cluster.shutdown() def test_no_exception_on_select(self): """ no exception on SELECT for numeric column name """ try: - self.session.execute('SELECT * FROM test3rf.ToBeIndexed') + self.session.execute('SELECT * FROM test1rf.table_num_col') except ValueError as e: self.fail("Unexpected ValueError exception: %s" % e.message) @@ -173,8 +172,11 @@ def test_can_select_using_alias(self): """ can SELECT "" AS aliases """ + if self._cass_version < (2, 0, 0): + raise unittest.SkipTest("Alias in SELECT not supported before 2.0") + try: - self.session.execute('SELECT key, "626972746864617465" AS my_col from test3rf.ToBeIndexed') + self.session.execute('SELECT key, "626972746864617465" AS my_col from test1rf.table_num_col') except ValueError as e: self.fail("Unexpected ValueError exception: %s" % e.message) @@ -184,6 +186,6 @@ def test_can_select_with_dict_factory(self): """ self.session.row_factory = dict_factory try: - self.session.execute('SELECT * FROM test3rf.ToBeIndexed') + self.session.execute('SELECT * FROM test1rf.table_num_col') except ValueError as e: self.fail("Unexpected ValueError exception: %s" % e.message) From d12896c540b0e39170bef1ace2f0f3c6c86f7ea0 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 11 Dec 2014 12:17:40 -0600 Subject: [PATCH 1077/3726] Skip paging test for protocol v1 also fix wording in some skip messages --- tests/integration/standard/test_concurrent.py | 5 +++++ tests/integration/standard/test_query.py | 2 +- tests/integration/standard/test_query_paging.py | 2 +- 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/tests/integration/standard/test_concurrent.py b/tests/integration/standard/test_concurrent.py index d8b7aac97c..78e005a8b5 100644 --- a/tests/integration/standard/test_concurrent.py +++ b/tests/integration/standard/test_concurrent.py @@ -84,6 +84,11 @@ def test_execute_concurrent_with_args(self): self.assertEqual([(True, [(i,)]) for i in range(num_statements)], results) def test_execute_concurrent_paged_result(self): + if PROTOCOL_VERSION < 2: + raise unittest.SkipTest( + "Protocol 2+ is required for Paging, currently testing against %r" + % (PROTOCOL_VERSION,)) + num_statements = 201 statement = SimpleStatement( "INSERT INTO test3rf.test (k, v) VALUES (%s, %s)", diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index 373f5260b1..e8728cd332 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -310,7 +310,7 @@ class SerialConsistencyTests(unittest.TestCase): def setUp(self): if PROTOCOL_VERSION < 2: raise unittest.SkipTest( - "Protocol 2.0+ is required for BATCH operations, currently testing against %r" + "Protocol 2.0+ is required for Serial Consistency, currently testing against %r" % (PROTOCOL_VERSION,)) self.cluster = Cluster(protocol_version=PROTOCOL_VERSION) diff --git a/tests/integration/standard/test_query_paging.py b/tests/integration/standard/test_query_paging.py index 448daf0c02..9bc0d9237f 100644 --- a/tests/integration/standard/test_query_paging.py +++ b/tests/integration/standard/test_query_paging.py @@ -37,7 +37,7 @@ class QueryPagingTests(unittest.TestCase): def setUp(self): if PROTOCOL_VERSION < 2: raise unittest.SkipTest( - "Protocol 2.0+ is required for BATCH operations, currently testing against %r" + "Protocol 2.0+ is required for Paging state, currently testing against %r" % (PROTOCOL_VERSION,)) self.cluster = Cluster(protocol_version=PROTOCOL_VERSION) From c8bb01825f6cc478d694ea4a5223b843f4405b5d Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 12 Dec 2014 13:52:05 -0600 Subject: [PATCH 1078/3726] Correct consistency assignment in Unavailable.__init__ --- cassandra/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index be8bad8494..9a710c601e 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -143,7 +143,7 @@ class Unavailable(Exception): """ The number of replicas that were actually alive """ def __init__(self, summary_message, consistency=None, required_replicas=None, alive_replicas=None): - self.consistency_level = consistency + self.consistency = consistency self.required_replicas = required_replicas self.alive_replicas = alive_replicas Exception.__init__(self, summary_message + ' info=' + From 4723a86f098196738722990954eaa97d34d6a7b4 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 12 Dec 2014 16:05:38 -0600 Subject: [PATCH 1079/3726] Update integration tests to CCM 2.0 API Also changed how modules interact with the package ccm cluster, making it more efficient, and hopefully robust. --- test-requirements.txt | 2 +- tests/integration/__init__.py | 137 +++--- tests/integration/long/test_consistency.py | 24 +- tests/integration/long/test_ipv6.py | 43 +- tests/integration/long/test_large_data.py | 6 +- .../long/test_loadbalancingpolicies.py | 28 +- tests/integration/long/test_multi_inserts.py | 13 +- tests/integration/long/test_schema.py | 29 +- .../standard/test_authentication.py | 451 ++---------------- tests/integration/standard/test_cluster.py | 8 +- tests/integration/standard/test_concurrent.py | 8 +- tests/integration/standard/test_connection.py | 8 +- tests/integration/standard/test_metadata.py | 18 +- tests/integration/standard/test_metrics.py | 8 +- .../standard/test_prepared_statements.py | 8 +- tests/integration/standard/test_query.py | 8 +- .../integration/standard/test_query_paging.py | 8 +- .../standard/test_row_factories.py | 6 +- tests/integration/standard/test_types.py | 6 +- tests/integration/standard/test_udts.py | 14 +- 20 files changed, 250 insertions(+), 583 deletions(-) diff --git a/test-requirements.txt b/test-requirements.txt index 7f1171da11..430515c0d3 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -2,7 +2,7 @@ blist scales nose mock -ccm +ccm>=2.0 unittest2 PyYAML pytz diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index b55bf17cc7..094ccb3c8e 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -18,7 +18,7 @@ try: import unittest2 as unittest except ImportError: - import unittest # noqa + import unittest # noqa import logging log = logging.getLogger(__name__) @@ -26,10 +26,13 @@ import os from threading import Event +from itertools import groupby + from cassandra.cluster import Cluster try: from ccmlib.cluster import Cluster as CCMCluster + from ccmlib.cluster_factory import ClusterFactory as CCMClusterFactory from ccmlib import common except ImportError as e: raise unittest.SkipTest('ccm is a dependency for integration tests:', e) @@ -40,9 +43,16 @@ CCM_CLUSTER = None CASSANDRA_DIR = os.getenv('CASSANDRA_DIR', None) -CASSANDRA_HOME = os.getenv('CASSANDRA_HOME', None) CASSANDRA_VERSION = os.getenv('CASSANDRA_VERSION', '2.0.9') +CCM_KWARGS = {} +if CASSANDRA_DIR: + log.info("Using Cassandra dir: %s", CASSANDRA_DIR) + CCM_KWARGS['install_dir'] = CASSANDRA_DIR +else: + log.info('Using Cassandra version: %s', CASSANDRA_VERSION) + CCM_KWARGS['version'] = CASSANDRA_VERSION + if CASSANDRA_VERSION.startswith('1'): default_protocol_version = 1 else: @@ -95,78 +105,75 @@ def get_node(node_id): return CCM_CLUSTER.nodes['node%s' % node_id] -def setup_package(): - if CASSANDRA_DIR: - log.info("Using Cassandra dir: %s", CASSANDRA_DIR) - elif CASSANDRA_HOME: - log.info("Using Cassandra home: %s", CASSANDRA_HOME) - else: - log.info('Using Cassandra version: %s', CASSANDRA_VERSION) - try: - try: - cluster = CCMCluster.load(path, CLUSTER_NAME) - log.debug("Found existing ccm test cluster, clearing") - cluster.clear() - if CASSANDRA_DIR: - cluster.set_cassandra_dir(cassandra_dir=CASSANDRA_DIR) - else: - cluster.set_cassandra_dir(cassandra_version=CASSANDRA_VERSION) - except Exception: - if CASSANDRA_DIR: - log.debug("Creating new ccm test cluster with cassandra dir %s", CASSANDRA_DIR) - cluster = CCMCluster(path, CLUSTER_NAME, cassandra_dir=CASSANDRA_DIR) - else: - log.debug("Creating new ccm test cluster with version %s", CASSANDRA_VERSION) - cluster = CCMCluster(path, CLUSTER_NAME, cassandra_version=CASSANDRA_VERSION) - cluster.set_configuration_options({'start_native_transport': True}) - common.switch_cluster(path, CLUSTER_NAME) - cluster.populate(3) +def use_multidc(dc_list): + use_cluster(MULTIDC_CLUSTER_NAME, dc_list, start=True) - log.debug("Starting ccm test cluster") - cluster.start(wait_for_binary_proto=True, wait_other_notice=True) - except Exception: - log.exception("Failed to start ccm cluster:") - raise +def use_singledc(start=True): + use_cluster(CLUSTER_NAME, [3], start=start) + + +def remove_cluster(): + global CCM_CLUSTER + if CCM_CLUSTER: + log.debug("removing cluster %s", CCM_CLUSTER.name) + CCM_CLUSTER.remove() + CCM_CLUSTER = None + + +def is_current_cluster(cluster_name, node_counts): global CCM_CLUSTER - CCM_CLUSTER = cluster - setup_test_keyspace() + if CCM_CLUSTER and CCM_CLUSTER.name == cluster_name: + if [len(list(nodes)) for dc, nodes in + groupby(CCM_CLUSTER.nodelist(), lambda n: n.data_center)] == node_counts: + return True + return False -def clear_and_use_multidc(dc_list): - teardown_package() +def use_cluster(cluster_name, nodes, ipformat=None, start=True): + if is_current_cluster(cluster_name, nodes): + return + + global CCM_CLUSTER + if CCM_CLUSTER: + CCM_CLUSTER.stop() + try: try: - cluster = CCMCluster.load(path, MULTIDC_CLUSTER_NAME) - log.debug("Found existing ccm test multi-dc cluster, clearing") + cluster = CCMClusterFactory.load(path, cluster_name) + log.debug("Found existing ccm %s cluster; clearing", cluster_name) cluster.clear() + cluster.set_install_dir(**CCM_KWARGS) except Exception: - log.debug("Creating new ccm test multi-dc cluster") - if CASSANDRA_DIR: - cluster = CCMCluster(path, MULTIDC_CLUSTER_NAME, cassandra_dir=CASSANDRA_DIR) - else: - cluster = CCMCluster(path, MULTIDC_CLUSTER_NAME, cassandra_version=CASSANDRA_VERSION) + log.debug("Creating new ccm %s cluster with %s", cluster_name, CCM_KWARGS) + cluster = CCMCluster(path, cluster_name, **CCM_KWARGS) cluster.set_configuration_options({'start_native_transport': True}) - common.switch_cluster(path, MULTIDC_CLUSTER_NAME) - cluster.populate(dc_list) + common.switch_cluster(path, cluster_name) + cluster.populate(nodes, ipformat=ipformat) - log.debug("Starting ccm test cluster") - cluster.start(wait_for_binary_proto=True, wait_other_notice=True) + if start: + log.debug("Starting ccm %s cluster", cluster_name) + cluster.start(wait_for_binary_proto=True, wait_other_notice=True) + setup_test_keyspace() + + CCM_CLUSTER = cluster except Exception: log.exception("Failed to start ccm cluster:") raise - global CCM_CLUSTER - CCM_CLUSTER = cluster - setup_test_keyspace() - log.debug("Switched to multidc cluster") - -def clear_and_use_singledc(): - teardown_package() +def teardown_package(): + for cluster_name in [CLUSTER_NAME, MULTIDC_CLUSTER_NAME]: + try: + cluster = CCMClusterFactory.load(path, cluster_name) + try: + cluster.remove() + log.info('Removed cluster: %s' % cluster_name) + except Exception: + log.exception('Failed to remove cluster: %s' % cluster_name) - setup_package() - log.debug("Switched to singledc cluster") + except Exception: + log.warn('Did not find cluster: %s' % cluster_name) def setup_test_keyspace(): @@ -211,22 +218,6 @@ def setup_test_keyspace(): cluster.shutdown() -def teardown_package(): - for cluster_name in [CLUSTER_NAME, MULTIDC_CLUSTER_NAME]: - try: - cluster = CCMCluster.load(path, cluster_name) - - try: - cluster.clear() - cluster.remove() - log.info('Cleared cluster: %s' % cluster_name) - except Exception: - log.exception('Failed to clear cluster: %s' % cluster_name) - - except Exception: - log.warn('Did not find cluster: %s' % cluster_name) - - class UpDownWaiter(object): def __init__(self, host): diff --git a/tests/integration/long/test_consistency.py b/tests/integration/long/test_consistency.py index 51e68a94d5..7cb7d14a4c 100644 --- a/tests/integration/long/test_consistency.py +++ b/tests/integration/long/test_consistency.py @@ -21,7 +21,7 @@ from cassandra.policies import TokenAwarePolicy, RoundRobinPolicy, \ DowngradingConsistencyRetryPolicy from cassandra.query import SimpleStatement -from tests.integration import PROTOCOL_VERSION +from tests.integration import use_singledc, PROTOCOL_VERSION from tests.integration.long.utils import force_stop, create_schema, \ wait_for_down, wait_for_up, start, CoordinatorStats @@ -29,7 +29,7 @@ try: import unittest2 as unittest except ImportError: - import unittest # noqa + import unittest # noqa ALL_CONSISTENCY_LEVELS = set([ ConsistencyLevel.ANY, ConsistencyLevel.ONE, ConsistencyLevel.TWO, @@ -43,6 +43,10 @@ SINGLE_DC_CONSISTENCY_LEVELS = ALL_CONSISTENCY_LEVELS - MULTI_DC_CONSISTENCY_LEVELS +def setup_module(): + use_singledc() + + class ConsistencyTests(unittest.TestCase): def setUp(self): @@ -132,11 +136,11 @@ def _test_tokenaware_one_node_down(self, keyspace, rf, accepted): self._assert_writes_succeed(session, keyspace, accepted) self._assert_reads_succeed(session, keyspace, - accepted - set([ConsistencyLevel.ANY])) + accepted - set([ConsistencyLevel.ANY])) self._assert_writes_fail(session, keyspace, - SINGLE_DC_CONSISTENCY_LEVELS - accepted) + SINGLE_DC_CONSISTENCY_LEVELS - accepted) self._assert_reads_fail(session, keyspace, - SINGLE_DC_CONSISTENCY_LEVELS - accepted) + SINGLE_DC_CONSISTENCY_LEVELS - accepted) finally: start(2) wait_for_up(cluster, 2) @@ -180,8 +184,8 @@ def test_rfthree_tokenaware_none_down(self): self._assert_writes_succeed(session, keyspace, SINGLE_DC_CONSISTENCY_LEVELS) self._assert_reads_succeed(session, keyspace, - SINGLE_DC_CONSISTENCY_LEVELS - set([ConsistencyLevel.ANY]), - expected_reader=2) + SINGLE_DC_CONSISTENCY_LEVELS - set([ConsistencyLevel.ANY]), + expected_reader=2) def _test_downgrading_cl(self, keyspace, rf, accepted): cluster = Cluster( @@ -203,11 +207,11 @@ def _test_downgrading_cl(self, keyspace, rf, accepted): self._assert_writes_succeed(session, keyspace, accepted) self._assert_reads_succeed(session, keyspace, - accepted - set([ConsistencyLevel.ANY])) + accepted - set([ConsistencyLevel.ANY])) self._assert_writes_fail(session, keyspace, - SINGLE_DC_CONSISTENCY_LEVELS - accepted) + SINGLE_DC_CONSISTENCY_LEVELS - accepted) self._assert_reads_fail(session, keyspace, - SINGLE_DC_CONSISTENCY_LEVELS - accepted) + SINGLE_DC_CONSISTENCY_LEVELS - accepted) finally: start(2) wait_for_up(cluster, 2) diff --git a/tests/integration/long/test_ipv6.py b/tests/integration/long/test_ipv6.py index 167394acd5..9d6dab1f83 100644 --- a/tests/integration/long/test_ipv6.py +++ b/tests/integration/long/test_ipv6.py @@ -18,8 +18,7 @@ from cassandra.cluster import Cluster, NoHostAvailable from ccmlib import common -from ccmlib.cluster import Cluster as CCMCluster -from tests.integration import setup_package, teardown_package, PROTOCOL_VERSION, CASSANDRA_DIR, CASSANDRA_VERSION, path +from tests.integration import use_cluster, remove_cluster, PROTOCOL_VERSION from cassandra.io.asyncorereactor import AsyncoreConnection try: @@ -40,39 +39,17 @@ IPV6_CLUSTER_NAME = 'ipv6_test_cluster' -def setup_module(cls): +def setup_module(module): validate_ccm_viable() validate_host_viable() - cls.ccm_cluster = object() - teardown_package() - try: - try: - ccm_cluster = CCMCluster.load(path, IPV6_CLUSTER_NAME) - log.debug("Found existing ccm test ipv6 cluster, clearing") - ccm_cluster.clear() - except Exception: - log.debug("Creating new ccm test ipv6 cluster") - if CASSANDRA_DIR: - ccm_cluster = CCMCluster(path, IPV6_CLUSTER_NAME, cassandra_dir=CASSANDRA_DIR) - else: - ccm_cluster = CCMCluster(path, IPV6_CLUSTER_NAME, cassandra_version=CASSANDRA_VERSION) - ccm_cluster.set_configuration_options({'start_native_transport': True}) - common.switch_cluster(path, IPV6_CLUSTER_NAME) - ccm_cluster.populate(1, ipformat='::%d') - - log.debug("Starting ccm test cluster") - ccm_cluster.start(wait_for_binary_proto=True) - except Exception: - log.exception("Failed to start ccm cluster:") - raise - - log.debug("Switched to ipv6 cluster") - cls.ccm_cluster = ccm_cluster - - -def teardown_module(cls): - cls.ccm_cluster.stop() - setup_package() + # We use a dedicated cluster (instead of common singledc, as in other tests) because + # it's most likely that the test host will only have one local ipv6 address (::1) + # singledc has three + use_cluster(IPV6_CLUSTER_NAME, [1], ipformat='::%d') + + +def teardown_module(): + remove_cluster() def validate_ccm_viable(): diff --git a/tests/integration/long/test_large_data.py b/tests/integration/long/test_large_data.py index fa4819dfcc..9fe1a6b6fe 100644 --- a/tests/integration/long/test_large_data.py +++ b/tests/integration/long/test_large_data.py @@ -24,10 +24,14 @@ from cassandra.cluster import Cluster from cassandra.query import dict_factory from cassandra.query import SimpleStatement -from tests.integration import PROTOCOL_VERSION +from tests.integration import use_singledc, PROTOCOL_VERSION from tests.integration.long.utils import create_schema +def setup_module(): + use_singledc() + + # Converts an integer to an string of letters def create_column_name(i): letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'] diff --git a/tests/integration/long/test_loadbalancingpolicies.py b/tests/integration/long/test_loadbalancingpolicies.py index 0263d47d5f..ea1eee78dd 100644 --- a/tests/integration/long/test_loadbalancingpolicies.py +++ b/tests/integration/long/test_loadbalancingpolicies.py @@ -21,7 +21,7 @@ TokenAwarePolicy, WhiteListRoundRobinPolicy) from cassandra.query import SimpleStatement -from tests.integration import clear_and_use_multidc, clear_and_use_singledc, PROTOCOL_VERSION +from tests.integration import use_singledc, use_multidc, remove_cluster, PROTOCOL_VERSION from tests.integration.long.utils import (wait_for_up, create_schema, CoordinatorStats, force_stop, wait_for_down, decommission, start, @@ -34,13 +34,15 @@ class LoadBalancingPolicyTests(unittest.TestCase): + def setUp(self): + remove_cluster() # clear ahead of test so it doesn't use one left in unknown state self.coordinator_stats = CoordinatorStats() self.prepared = None @classmethod - def tearDownClass(cls): - clear_and_use_singledc() + def teardown_class(cls): + remove_cluster() def _insert(self, session, keyspace, count=12, consistency_level=ConsistencyLevel.ONE): @@ -67,7 +69,7 @@ def _query(self, session, keyspace, count=12, self.coordinator_stats.add_coordinator(session.execute_async(ss)) def test_roundrobin(self): - clear_and_use_singledc() + use_singledc() keyspace = 'test_roundrobin' cluster = Cluster( load_balancing_policy=RoundRobinPolicy(), @@ -108,7 +110,7 @@ def test_roundrobin(self): self.coordinator_stats.assert_query_count_equals(self, 3, 6) def test_roundrobin_two_dcs(self): - clear_and_use_multidc([2, 2]) + use_multidc([2, 2]) keyspace = 'test_roundrobin_two_dcs' cluster = Cluster( load_balancing_policy=RoundRobinPolicy(), @@ -146,7 +148,7 @@ def test_roundrobin_two_dcs(self): self.coordinator_stats.assert_query_count_equals(self, 5, 3) def test_roundrobin_two_dcs_2(self): - clear_and_use_multidc([2, 2]) + use_multidc([2, 2]) keyspace = 'test_roundrobin_two_dcs_2' cluster = Cluster( load_balancing_policy=RoundRobinPolicy(), @@ -184,7 +186,7 @@ def test_roundrobin_two_dcs_2(self): self.coordinator_stats.assert_query_count_equals(self, 5, 3) def test_dc_aware_roundrobin_two_dcs(self): - clear_and_use_multidc([3, 2]) + use_multidc([3, 2]) keyspace = 'test_dc_aware_roundrobin_two_dcs' cluster = Cluster( load_balancing_policy=DCAwareRoundRobinPolicy('dc1'), @@ -207,7 +209,7 @@ def test_dc_aware_roundrobin_two_dcs(self): self.coordinator_stats.assert_query_count_equals(self, 5, 0) def test_dc_aware_roundrobin_two_dcs_2(self): - clear_and_use_multidc([3, 2]) + use_multidc([3, 2]) keyspace = 'test_dc_aware_roundrobin_two_dcs_2' cluster = Cluster( load_balancing_policy=DCAwareRoundRobinPolicy('dc2'), @@ -230,7 +232,7 @@ def test_dc_aware_roundrobin_two_dcs_2(self): self.coordinator_stats.assert_query_count_equals(self, 5, 6) def test_dc_aware_roundrobin_one_remote_host(self): - clear_and_use_multidc([2, 2]) + use_multidc([2, 2]) keyspace = 'test_dc_aware_roundrobin_one_remote_host' cluster = Cluster( load_balancing_policy=DCAwareRoundRobinPolicy('dc2', used_hosts_per_remote_dc=1), @@ -321,7 +323,7 @@ def test_token_aware_prepared(self): self.token_aware(keyspace, True) def token_aware(self, keyspace, use_prepared=False): - clear_and_use_singledc() + use_singledc() cluster = Cluster( load_balancing_policy=TokenAwarePolicy(RoundRobinPolicy()), protocol_version=PROTOCOL_VERSION) @@ -393,7 +395,7 @@ def token_aware(self, keyspace, use_prepared=False): self.coordinator_stats.assert_query_count_equals(self, 2, 0) def test_token_aware_composite_key(self): - clear_and_use_singledc() + use_singledc() keyspace = 'test_token_aware_composite_key' table = 'composite' cluster = Cluster( @@ -422,7 +424,7 @@ def test_token_aware_composite_key(self): self.assertTrue(results[0].i) def test_token_aware_with_rf_2(self, use_prepared=False): - clear_and_use_singledc() + use_singledc() keyspace = 'test_token_aware_with_rf_2' cluster = Cluster( load_balancing_policy=TokenAwarePolicy(RoundRobinPolicy()), @@ -451,7 +453,7 @@ def test_token_aware_with_rf_2(self, use_prepared=False): self.coordinator_stats.assert_query_count_equals(self, 3, 12) def test_white_list(self): - clear_and_use_singledc() + use_singledc() keyspace = 'test_white_list' cluster = Cluster(('127.0.0.2',), diff --git a/tests/integration/long/test_multi_inserts.py b/tests/integration/long/test_multi_inserts.py index e395bb5700..10422c974b 100755 --- a/tests/integration/long/test_multi_inserts.py +++ b/tests/integration/long/test_multi_inserts.py @@ -6,6 +6,11 @@ import os from cassandra.cluster import Cluster +from tests.integration import use_singledc, PROTOCOL_VERSION + + +def setup_module(): + use_singledc() class StressInsertsTests(unittest.TestCase): @@ -21,7 +26,7 @@ def setUp(self): DROP TABLE IF EXISTS race; CREATE TABLE race (x int PRIMARY KEY); """ - self.cluster=Cluster() + self.cluster = Cluster(protocol_version=PROTOCOL_VERSION) self.session = self.cluster.connect('test1rf') ddl1 = ''' @@ -46,9 +51,9 @@ def test_in_flight_is_one(self): The number of inserts can be set through INSERTS_ITERATIONS environmental variable. Default value is 1000000. """ - prepared = self.session.prepare ("INSERT INTO race (x) VALUES (?)") + prepared = self.session.prepare("INSERT INTO race (x) VALUES (?)") iterations = int(os.getenv("INSERT_ITERATIONS", 1000000)) - i=0 + i = 0 leaking_connections = False while i < iterations and not leaking_connections: bound = prepared.bind((i,)) @@ -61,6 +66,6 @@ def test_in_flight_is_one(self): print self.session.get_pool_state() leaking_connections = True break - i=i+1 + i = i + 1 self.assertFalse(leaking_connections, 'Detected leaking connection after %s iterations' % i) diff --git a/tests/integration/long/test_schema.py b/tests/integration/long/test_schema.py index 3d40713cef..bad7df2cd1 100644 --- a/tests/integration/long/test_schema.py +++ b/tests/integration/long/test_schema.py @@ -17,21 +17,33 @@ from cassandra import ConsistencyLevel, OperationTimedOut from cassandra.cluster import Cluster from cassandra.query import SimpleStatement -from tests.integration import PROTOCOL_VERSION +from tests.integration import use_singledc, PROTOCOL_VERSION try: import unittest2 as unittest except ImportError: - import unittest # noqa + import unittest # noqa log = logging.getLogger(__name__) +def setup_module(): + use_singledc() + + class SchemaTests(unittest.TestCase): + @classmethod + def setup_class(cls): + cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION) + cls.session = cls.cluster.connect() + + @classmethod + def teardown_class(cls): + cls.cluster.shutdown() + def test_recreates(self): - cluster = Cluster(protocol_version=PROTOCOL_VERSION) - session = cluster.connect() + session = self.session replication_factor = 3 for i in range(2): @@ -66,8 +78,7 @@ def test_recreates(self): session.execute(ss) def test_for_schema_disagreements_different_keyspaces(self): - cluster = Cluster(protocol_version=PROTOCOL_VERSION) - session = cluster.connect() + session = self.session for i in xrange(30): try: @@ -90,7 +101,8 @@ def test_for_schema_disagreements_different_keyspaces(self): session.execute(''' DROP KEYSPACE test_%s ''' % i) - except OperationTimedOut: pass + except OperationTimedOut: + pass def test_for_schema_disagreements_same_keyspace(self): cluster = Cluster(protocol_version=PROTOCOL_VERSION) @@ -117,4 +129,5 @@ def test_for_schema_disagreements_same_keyspace(self): session.execute(''' DROP KEYSPACE test ''') - except OperationTimedOut: pass + except OperationTimedOut: + pass diff --git a/tests/integration/standard/test_authentication.py b/tests/integration/standard/test_authentication.py index 10574bb5a4..c6090af66a 100644 --- a/tests/integration/standard/test_authentication.py +++ b/tests/integration/standard/test_authentication.py @@ -13,14 +13,9 @@ # limitations under the License. import logging -import time -import socket -from ccmlib import common -from ccmlib.cluster import Cluster as CCMCluster - -import tests.integration -from cassandra.cluster import Cluster, NoHostAvailable, Unauthorized, InvalidRequest +from tests.integration import use_singledc, get_cluster, remove_cluster, PROTOCOL_VERSION +from cassandra.cluster import Cluster, NoHostAvailable from cassandra.auth import PlainTextAuthProvider @@ -31,85 +26,13 @@ log = logging.getLogger(__name__) -AUTH_CLUSTER_NAME = 'auth_test_cluster' - - -def wait_for_cassandra(port=7000, maxwait=120): - """ - Wait until there is connection to specified port. Use cassandra port 7000 by default. - :param port: port to try - :param maxwait: maximum wait time in seconds - :return: True if can connect within 2 minutes, False otherwise - """ - sleeptime = 2 - server_address = ('localhost', port) - wait_time = 0 - while wait_time < maxwait: - try: - s = socket.create_connection(server_address) - s.close() - log.debug("Cassandra ready after %d seconds" % wait_time) - return True - except socket.error: - wait_time += sleeptime - time.sleep(sleeptime) - - return False - - -def try_connecting(username='', password=''): - """ - Wait until can connect to cluster. - When cluster starts up there is some time while it is not possible to connect to it even though - Cassandra is listening on port 7000. Here we wait until we can actually issue successful Cluster.connect() - method. - :param username: optional user name for connection - :param password: optional password for connection - :return: True if can successfully connect to cluster within 2 minutes, False otherwise - """ - - if username and password: - ap = AuthenticationTests.get_authentication_provider(username, password) - else: - ap = None - - maxwait = 120 # in seconds - sleeptime = 1 - - wait_time = 0 - while wait_time < maxwait: - try: - cluster = Cluster(protocol_version=tests.integration.PROTOCOL_VERSION, auth_provider=ap) - cluster.connect() - log.debug("Can connect after %d seconds" % wait_time) - return True - except Exception: - wait_time += sleeptime - time.sleep(sleeptime) - - return False - - -###################################################### -# Module level setup and teardown -# def setup_module(): - - tests.integration.teardown_package() + use_singledc(start=False) def teardown_module(): - """ - Start generic tests cluster - """ - tests.integration.setup_package() - wait_for_cassandra() - try_connecting() - -# -# End of Module level setup and teardown -#################################################### + remove_cluster() # this test messes with config class AuthenticationTests(unittest.TestCase): @@ -117,96 +40,18 @@ class AuthenticationTests(unittest.TestCase): Tests to cover basic authentication functionality """ - ccm_cluster = None - - password = '12345' - suser = 'new_suser' - test_user = 'test_user' - test_other_user = 'other_user' - - ########################## - # Class level setup and teardown - ## - @classmethod - def setup_class(cls): - """ - Class-level fixture. Called once for all tests. - We create a cluster object here and save it in the provided class instance - Create 2 regular users and a new superuser. - Enable authentication by setting 'authenticator': 'PasswordAuthenticator'. - If invoked from authorization class enable authorization by setting 'authorizer': 'CassandraAuthorizer'. - """ - try: - ccm_cluster = CCMCluster.load(tests.integration.path, AUTH_CLUSTER_NAME) - log.debug("Found existing ccm test authentication cluster, removing") - ccm_cluster.remove() - except Exception: - log.debug("Can not load cluster %s ....." % AUTH_CLUSTER_NAME) - - log.debug("Creating new ccm test authentication cluster") - if tests.integration.CASSANDRA_DIR: - ccm_cluster = CCMCluster(tests.integration.path, AUTH_CLUSTER_NAME, cassandra_dir=tests.integration.CASSANDRA_DIR) - else: - ccm_cluster = CCMCluster(tests.integration.path, AUTH_CLUSTER_NAME, cassandra_version=tests.integration.CASSANDRA_VERSION) - - ccm_cluster.set_configuration_options({'start_native_transport': True}) - ccm_cluster.set_configuration_options({'authenticator': 'PasswordAuthenticator'}) - - # - # This method is called either with AuthenticationTests class or with AuthorizedAuthenticationTests class. - # In the second case we enable CassandraAuthorizer - # - if cls.__name__ == 'AuthorizedAuthenticationTests': - print "Running tests with Cassandra Authorizer Enabled" - log.info("Running tests with Cassandra Authorizer Enabled") - ccm_cluster.set_configuration_options({'authorizer': 'CassandraAuthorizer'}) - else: - print "Running tests with Cassandra Authorizer Disabled" - log.info("Running tests with Cassandra Authorizer Disabled") - - common.switch_cluster(tests.integration.path, AUTH_CLUSTER_NAME) - ccm_cluster.populate(1) - - log.debug("Starting ccm test authentication cluster") + @staticmethod + def init(config_options): + ccm_cluster = get_cluster() + ccm_cluster.stop() + ccm_cluster.set_configuration_options(config_options) + log.debug("Starting ccm test cluster with %s", config_options) ccm_cluster.start(wait_for_binary_proto=True) - if not wait_for_cassandra() or not try_connecting('cassandra', 'cassandra'): - log.exception("Can not talk to cassandra") - raise Exception('Can not talk to cassandra') - - log.debug("Switched to AUTH_CLUSTER_NAME cluster") - cls.ccm_cluster = ccm_cluster - - cls.root_cluster = cls.cluster_as('cassandra', 'cassandra') - cls.root_session = cls.root_cluster.connect() - cls.create_user(cls.root_cluster, cls.test_user, cls.password) - cls.create_user(cls.root_cluster, cls.test_other_user, cls.password) - cls.create_user(cls.root_cluster, cls.suser, cls.password, su=True) - @classmethod - def tearDownClass(cls): - """ - Remove authentication cluster - """ - cls.ccm_cluster.remove() - - ## - # End of class-level setup and teardown - ######################################################## - - def tearDown(self): - """ - Class method fixture setup. Called after each test - Reverse settings for users to test default values - """ - self.session = self.root_cluster.connect() - self.session.execute("ALTER USER %s WITH PASSWORD '%s' SUPERUSER " % (self.suser, self.password)) - self.session.execute("ALTER USER %s WITH PASSWORD '%s' NOSUPERUSER " % (self.test_other_user, self.password)) - self.session.execute("ALTER USER %s WITH PASSWORD '%s' NOSUPERUSER " % (self.test_user, self.password)) - - @classmethod - def enable_authorizer(cls): - cls.authorizer = True + def setup_class(cls): + cls.init({'authenticator': 'PasswordAuthenticator', + 'authorizer': 'AllowAllAuthorizer'}) @staticmethod def v1_authentication_provider(username, password): @@ -224,280 +69,54 @@ def get_authentication_provider(username, password): :param password: authentication password :return: authentication object suitable for Cluster.connect() """ - if tests.integration.PROTOCOL_VERSION < 2: + if PROTOCOL_VERSION < 2: return lambda(hostname): dict(username=username, password=password) else: return PlainTextAuthProvider(username=username, password=password) - @staticmethod - def create_user(cluster, usr, pwd, su=False): - """ - Create user with given username and password and optional status - """ - status = '' - if su: - status = 'SUPERUSER' - session = cluster.connect() - session.execute("CREATE USER %s WITH PASSWORD '%s' %s " % (usr, pwd, status)) - session.shutdown() - - @staticmethod - def list_users(cluster): - """ - List existing cluster users - :param cluster: cluster object - :return: list of user names - """ - return AuthenticationTests.get_user_data(cluster).keys() - - @staticmethod - def get_user_data(cluster): - """ - Get information about users as a dictionary with user name and superuser status - :param cluster: cluster to use - :return: dictionary with user name as key and su status as value - """ - session = cluster.connect() - result = session.execute("LIST USERS") - users = dict() - for row in result: - users[row.name] = row.super - session.shutdown() - return users - @staticmethod def cluster_as(usr, pwd): - """ - Create CLuster object authenticated with specified user and password - :rtype : Cluster - :param usr: username - :param pwd: user password - :return: CLuster object - """ - return Cluster(protocol_version=tests.integration.PROTOCOL_VERSION, + return Cluster(protocol_version=PROTOCOL_VERSION, auth_provider=AuthenticationTests.get_authentication_provider(username=usr, password=pwd)) - def test_new_user_created(self): - """ - new_user_created: superuser can create users - """ - self.assertIn(self.test_user, self.list_users(self.root_cluster), 'user %s is missing' % self.test_user) - self.assertIn(self.suser, self.list_users(self.root_cluster), 'user %s is missing' % self.suser) - - def test_user_status(self): - """ - user_status: regular user and superuser have right status in LIST USERS output - """ - users = self.get_user_data(self.root_cluster) - self.assertIn(self.test_user, users.keys()) - self.assertEqual(users[self.test_user], False) - - def test_user_can_connect(self): - """ - user_can_connect: regular user can connect to C* and LIST USERS - """ - cluster = self.cluster_as(self.test_user, self.password) - # Verify that regular user can LIST USERS - self.assertIn(self.test_user, self.list_users(cluster), 'user %s is missing' % self.test_user) + def test_auth_connect(self): + user = 'u' + passwd = 'password' - def test_new_superuser_can_connect(self): - """ - new_superuser_can_connect: new superuser can connect to C* and LIST USERS - """ - # First verify that suser is created - self.assertIn(self.suser, self.list_users(self.root_cluster), 'user %s is missing' % self.suser) - # Now verify that we can connect as suser - cluster = self.cluster_as(self.suser, self.password) - self.assertIn(self.suser, self.list_users(cluster), 'user %s is missing' % self.suser) - - def test_user_cannot_create_user(self): - """ - user_cannot_create_user: regular user can not CREATE USER - """ - cluster = self.cluster_as(self.test_user, self.password) - self.assertRaisesRegexp(Unauthorized, - '.*Only superusers are allowed to perform CREATE USER queries.*', - lambda: self.create_user(cluster, 'test_user_1', '12345')) - - def test_user_cannot_change_su_pwd(self): - """ - user_cannot_change_su_pwd: regular user cannot change password for superuser - """ - cluster = self.cluster_as(self.test_user, self.password) - session = cluster.connect() - stmt = "ALTER USER cassandra WITH PASSWORD 'new_su_pwd' " - self.assertRaisesRegexp(Unauthorized, - ".*You aren't allowed to alter this user.*", - lambda: session.execute(stmt)) - - def test_user_cannot_change_other_user_pwd(self): - """ - user_cannot_change_other_user_pwd: regular user cannot change password for other user - """ - cluster = self.cluster_as(self.test_user, self.password) - session = cluster.connect() - stmt = "ALTER USER other_user WITH PASSWORD 'new_reg_pwd' " - self.assertRaisesRegexp(Unauthorized, - ".*You aren't allowed to alter this user.*", - lambda: session.execute(stmt)) + root_session = self.cluster_as('cassandra', 'cassandra').connect() + root_session.execute('CREATE USER %s WITH PASSWORD %s', (user, passwd)) - def test_regular_user_can_change_his_pwd(self): - """ - regular_user_can_change_his_pwd: regular user can change his password and connect to C* with new pwd - and cannot connect with old pwd - """ - cluster = self.cluster_as(self.test_user, self.password) + cluster = self.cluster_as(user, passwd) session = cluster.connect() + self.assertTrue(session.execute('SELECT release_version FROM system.local')) + cluster.shutdown() - new_password = 'AxaXax' - session.execute("ALTER USER %s WITH PASSWORD '%s' " % (self.test_user, new_password)) - session.shutdown() - - # and can connect to the cluster with new password - cluster = self.cluster_as(self.test_user, new_password) - session = cluster.connect() - session.shutdown() - # Should be able to execute some commands - self.assertIn(self.test_user, self.list_users(cluster), 'user %s is missing' % self.test_user) + root_session.execute('DROP USER %s', user) + root_session.cluster.shutdown() - # Cannot connect with old pwd - cluster = self.cluster_as(self.test_user, self.password) - self.assertRaisesRegexp(NoHostAvailable, - '.*Bad credentials.*Username and/or ' - 'password are incorrect.*', - cluster.connect) - - def test_su_can_alter_reg_user_status(self): - """ - su_can_alter_reg_user_status: superuser can change regular user status - """ - session = self.root_cluster.connect() - - # Turn test_user into superuser - session.execute("ALTER USER %s WITH PASSWORD '%s' SUPERUSER " % (self.test_user, self.password)) - users = self.get_user_data(self.root_cluster) - self.assertEqual(users[self.test_user], True, msg="%s has SUPERUSER set to %s" - % (self.test_user, users[self.test_user])) - - def test_su_can_alter_other_su_status(self): - """ - su_can_alter_other_su_status: superuser can change other superuser status - """ - session = self.root_cluster.connect() - - # Turn suser into regular user - session.execute("ALTER USER %s WITH PASSWORD '%s' NOSUPERUSER " % (self.suser, self.password)) - users = self.get_user_data(self.root_cluster) - self.assertEqual(users[self.suser], False, msg="%s has SUPERUSER set to %s" % (self.suser, users[self.suser])) - - def test_su_cannot_alter_his_status(self): - """ - su_cannot_alter_his_status: superuser can't change his status - """ - session = self.root_cluster.connect() - stmt = "ALTER USER cassandra WITH PASSWORD 'cassandra' NOSUPERUSER " - self.assertRaisesRegexp(Unauthorized, - ".*You aren't allowed to alter your own superuser status.*", - lambda: session.execute(stmt)) - - def test_su_can_change_her_pwd(self): - """ - su_can_change_her_pwd: superuser can change her password and connect to C* with new pwd - and cannot connect with old pwd - """ - cluster = self.cluster_as(self.suser, self.password) - session = cluster.connect() - - new_password = 'AxaXax' - session.execute("ALTER USER %s WITH PASSWORD '%s' " % (self.suser, new_password)) - session.shutdown() - - # and can connect to the cluster with new password - cluster = self.cluster_as(self.suser, new_password) - session = cluster.connect() - # Should be able to execute some commands - self.assertIn(self.test_user, self.list_users(cluster), 'user %s is missing' % self.suser) - session.shutdown() - # Cannot connect with old pwd - cluster = self.cluster_as(self.suser, self.password) - self.assertRaisesRegexp(NoHostAvailable, - '.*Bad credentials.*Username and/or ' - 'password are incorrect.*', - cluster.connect) - - def test_su_can_drop_other_superuser(self): - """ - su_can_drop_other_superuser: superuser can drop other superuser - """ - session = self.root_cluster.connect() - self.create_user(self.root_cluster, 'temp_su_user', self.password, su=True) - session.execute("DROP USER temp_su_user") - self.assertNotIn('temp_du_user', self.list_users(self.root_cluster), 'user temp_su_user NOT dropped') - - def test_su_can_drop_regular_user(self): - """ - su_can_drop_regular_user: superuser can drop regular user - """ - session = self.root_cluster.connect() - self.create_user(self.root_cluster, 'temp_reg_user', self.password) - session.execute("DROP USER temp_reg_user") - self.assertNotIn('temp_reg_user', self.list_users(self.root_cluster), 'user temp_reg_user NOT dropped') - - def test_su_cannot_drop_herself(self): - """ - su_cannot_drop_herself: superuser can't drop herself - """ - self.create_user(self.root_cluster, 'temp_su_user', self.password, su=True) - cluster = self.cluster_as('temp_su_user', self.password) - self.session = cluster.connect() - stmt = "DROP USER temp_su_user" - self.assertRaisesRegexp(InvalidRequest, - ".*Users aren't allowed to DROP themselves.*", - lambda: self.session.execute(stmt)) - - def test_su_connect_wrong_pwd(self): - """ - su_connect_wrong_pwd: can't connect with bad password - Verifies that trying to connect with wrong password triggers - "AuthenticationFailed /Bad credentials / Username and/or - password are incorrect" - """ - cluster = self.cluster_as('cassandra', 'who_is_cassandra') + def test_connect_wrong_pwd(self): + cluster = self.cluster_as('cassandra', 'wrong_pass') self.assertRaisesRegexp(NoHostAvailable, '.*AuthenticationFailed.*Bad credentials.*Username and/or ' 'password are incorrect.*', cluster.connect) - def test_su_connect_wrong_username(self): - """ - su_connect_wrong_username: can't connect with bad username - Try to connect with wrong superuser name fails with: "AuthenticationFailed /Bad credentials / Username and/or - password are incorrect" - """ - cluster = self.cluster_as('Cassandra', 'cassandra') + def test_connect_wrong_username(self): + cluster = self.cluster_as('wrong_user', 'cassandra') self.assertRaisesRegexp(NoHostAvailable, '.*AuthenticationFailed.*Bad credentials.*Username and/or ' 'password are incorrect.*', cluster.connect) - def test_su_connect_empty_pwd(self): - """ - su_connect_empty_pwd: can't connect with an empty password - Empty superuser password triggers "AuthenticationFailed /Bad credentials / - Username and/or password are incorrect" - """ + def test_connect_empty_pwd(self): cluster = self.cluster_as('Cassandra', '') self.assertRaisesRegexp(NoHostAvailable, '.*AuthenticationFailed.*Bad credentials.*Username and/or ' 'password are incorrect.*', cluster.connect) - def test_su_connect_no_user_specified(self): - """ - su_connect_no_user_specified: can't connect with an empty user - No user specified triggers "AuthenticationFailed /Remote end requires authentication - """ - cluster = Cluster(protocol_version=tests.integration.PROTOCOL_VERSION) + def test_connect_no_auth_provider(self): + cluster = Cluster(protocol_version=PROTOCOL_VERSION) self.assertRaisesRegexp(NoHostAvailable, '.*AuthenticationFailed.*Remote end requires authentication.*', cluster.connect) @@ -507,4 +126,8 @@ class AuthorizedAuthenticationTests(AuthenticationTests): """ Same test as AuthenticationTests but enables authorization """ - pass + + @classmethod + def setup_class(cls): + cls.init({'authenticator': 'PasswordAuthenticator', + 'authorizer': 'CassandraAuthorizer'}) diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index 1c485f4180..957a421434 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -12,12 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -from tests.integration import PROTOCOL_VERSION +from tests.integration import use_singledc, PROTOCOL_VERSION try: import unittest2 as unittest except ImportError: - import unittest # noqa + import unittest # noqa import cassandra from cassandra.query import SimpleStatement, TraceUnavailable @@ -26,6 +26,10 @@ from cassandra.cluster import Cluster, NoHostAvailable +def setup_module(): + use_singledc() + + class ClusterTests(unittest.TestCase): def test_basic(self): diff --git a/tests/integration/standard/test_concurrent.py b/tests/integration/standard/test_concurrent.py index 78e005a8b5..211c44206d 100644 --- a/tests/integration/standard/test_concurrent.py +++ b/tests/integration/standard/test_concurrent.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from tests.integration import PROTOCOL_VERSION +from tests.integration import use_singledc, PROTOCOL_VERSION try: import unittest2 as unittest @@ -29,6 +29,10 @@ from cassandra.query import tuple_factory, SimpleStatement +def setup_module(): + use_singledc() + + class ClusterTests(unittest.TestCase): def setUp(self): @@ -103,7 +107,7 @@ def test_execute_concurrent_paged_result(self): statement = SimpleStatement( "SELECT * FROM test3rf.test LIMIT %s", consistency_level=ConsistencyLevel.QUORUM, - fetch_size=int(num_statements/2)) + fetch_size=int(num_statements / 2)) parameters = [(i, ) for i in range(num_statements)] results = execute_concurrent_with_args(self.session, statement, [(num_statements,)]) diff --git a/tests/integration/standard/test_connection.py b/tests/integration/standard/test_connection.py index a0181486dc..d17f692a7f 100644 --- a/tests/integration/standard/test_connection.py +++ b/tests/integration/standard/test_connection.py @@ -12,12 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -from tests.integration import PROTOCOL_VERSION +from tests.integration import use_singledc, PROTOCOL_VERSION try: import unittest2 as unittest except ImportError: - import unittest # noqa + import unittest # noqa from functools import partial from six.moves import range @@ -35,6 +35,10 @@ LibevConnection = None +def setup_module(): + use_singledc() + + class ConnectionTests(object): klass = None diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 0899e859a9..ec6485fc08 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -16,7 +16,7 @@ try: import unittest2 as unittest except ImportError: - import unittest # noqa + import unittest # noqa from mock import Mock @@ -28,7 +28,12 @@ from cassandra.policies import SimpleConvictionPolicy from cassandra.pool import Host -from tests.integration import get_cluster, PROTOCOL_VERSION, get_server_versions +from tests.integration import (get_cluster, use_singledc, PROTOCOL_VERSION, + get_server_versions) + + +def setup_module(): + use_singledc() class SchemaMetadataTests(unittest.TestCase): @@ -342,6 +347,11 @@ def test_export_keyspace_schema_udts(self): if get_server_versions()[0] < (2, 1, 0): raise unittest.SkipTest('UDTs were introduced in Cassandra 2.1') + if PROTOCOL_VERSION < 3: + raise unittest.SkipTest( + "Protocol 3.0+ is required for UDT change events, currently testing against %r" + % (PROTOCOL_VERSION,)) + cluster = Cluster(protocol_version=PROTOCOL_VERSION) session = cluster.connect() @@ -503,8 +513,8 @@ def test_replicas(self): self.assertNotEqual(list(cluster.metadata.get_replicas('test3rf', 'key')), []) host = list(cluster.metadata.get_replicas('test3rf', 'key'))[0] - self.assertEqual(host.datacenter, 'datacenter1') - self.assertEqual(host.rack, 'rack1') + self.assertEqual(host.datacenter, 'dc1') + self.assertEqual(host.rack, 'r1') def test_token_map(self): """ diff --git a/tests/integration/standard/test_metrics.py b/tests/integration/standard/test_metrics.py index 9793aa228c..828ed6000c 100644 --- a/tests/integration/standard/test_metrics.py +++ b/tests/integration/standard/test_metrics.py @@ -17,13 +17,17 @@ try: import unittest2 as unittest except ImportError: - import unittest # noqa + import unittest # noqa from cassandra.query import SimpleStatement from cassandra import ConsistencyLevel, WriteTimeout, Unavailable, ReadTimeout from cassandra.cluster import Cluster, NoHostAvailable -from tests.integration import get_node, get_cluster, PROTOCOL_VERSION +from tests.integration import get_cluster, get_node, use_singledc, PROTOCOL_VERSION + + +def setup_module(): + use_singledc() class MetricsTests(unittest.TestCase): diff --git a/tests/integration/standard/test_prepared_statements.py b/tests/integration/standard/test_prepared_statements.py index 3957c175cb..e91e0edefb 100644 --- a/tests/integration/standard/test_prepared_statements.py +++ b/tests/integration/standard/test_prepared_statements.py @@ -13,18 +13,22 @@ # limitations under the License. -from tests.integration import PROTOCOL_VERSION +from tests.integration import use_singledc, PROTOCOL_VERSION try: import unittest2 as unittest except ImportError: - import unittest # noqa + import unittest # noqa from cassandra import InvalidRequest from cassandra.cluster import Cluster from cassandra.query import PreparedStatement +def setup_module(): + use_singledc() + + class PreparedStatementTests(unittest.TestCase): def test_basic(self): diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index e8728cd332..c0db307555 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -27,7 +27,11 @@ from cassandra.cluster import Cluster from cassandra.policies import HostDistance -from tests.integration import PROTOCOL_VERSION +from tests.integration import use_singledc, PROTOCOL_VERSION + + +def setup_module(): + use_singledc() class QueryTests(unittest.TestCase): @@ -396,7 +400,6 @@ def test_no_connection_refused_on_timeout(self): delete_statement = self.session.prepare("DELETE FROM test3rf.lwt WHERE k = 0 IF EXISTS") iterations = int(os.getenv("LWT_ITERATIONS", 1000)) - print("Started test for %d iterations" % iterations) # Prepare series of parallel statements statements_and_params = [] @@ -414,7 +417,6 @@ def test_no_connection_refused_on_timeout(self): self.fail("PYTHON-91: Disconnected from Cassandra: %s" % result.message) break if type(result).__name__ == "WriteTimeout": - print("Timeout: %s" % result.message) received_timeout = True continue self.fail("Unexpected exception %s: %s" % (type(result).__name__, result.message)) diff --git a/tests/integration/standard/test_query_paging.py b/tests/integration/standard/test_query_paging.py index 9bc0d9237f..e634f36a42 100644 --- a/tests/integration/standard/test_query_paging.py +++ b/tests/integration/standard/test_query_paging.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from tests.integration import PROTOCOL_VERSION +from tests.integration import use_singledc, PROTOCOL_VERSION import logging log = logging.getLogger(__name__) @@ -20,7 +20,7 @@ try: import unittest2 as unittest except ImportError: - import unittest # noqa + import unittest # noqa from itertools import cycle, count from six.moves import range @@ -32,6 +32,10 @@ from cassandra.query import SimpleStatement +def setup_module(): + use_singledc() + + class QueryPagingTests(unittest.TestCase): def setUp(self): diff --git a/tests/integration/standard/test_row_factories.py b/tests/integration/standard/test_row_factories.py index c697003a1f..354021f257 100644 --- a/tests/integration/standard/test_row_factories.py +++ b/tests/integration/standard/test_row_factories.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from tests.integration import get_server_versions, PROTOCOL_VERSION +from tests.integration import get_server_versions, use_singledc, PROTOCOL_VERSION try: import unittest2 as unittest @@ -24,6 +24,10 @@ from cassandra.util import OrderedDict +def setup_module(): + use_singledc() + + class RowFactoryTests(unittest.TestCase): """ Test different row_factories and access code diff --git a/tests/integration/standard/test_types.py b/tests/integration/standard/test_types.py index 553085f6e8..fb985fa7dc 100644 --- a/tests/integration/standard/test_types.py +++ b/tests/integration/standard/test_types.py @@ -32,7 +32,11 @@ from cassandra.query import dict_factory from cassandra.util import OrderedDict, sortedset -from tests.integration import get_server_versions, PROTOCOL_VERSION +from tests.integration import get_server_versions, use_singledc, PROTOCOL_VERSION + + +def setup_module(): + use_singledc() class TypeTests(unittest.TestCase): diff --git a/tests/integration/standard/test_udts.py b/tests/integration/standard/test_udts.py index ae5ce5ed2e..f8af72bd1f 100644 --- a/tests/integration/standard/test_udts.py +++ b/tests/integration/standard/test_udts.py @@ -25,11 +25,15 @@ from cassandra.cluster import Cluster, UserTypeDoesNotExist -from tests.integration import get_server_versions, PROTOCOL_VERSION +from tests.integration import get_server_versions, use_singledc, PROTOCOL_VERSION from tests.integration.datatype_utils import get_sample, get_nonprim_sample,\ DATA_TYPE_PRIMITIVES, DATA_TYPE_NON_PRIMITIVE_NAMES +def setup_module(): + use_singledc() + + class TypeTests(unittest.TestCase): def setUp(self): @@ -330,7 +334,7 @@ def test_nested_registered_udts(self): if self._cass_version < (2, 1, 0): raise unittest.SkipTest("The tuple type was introduced in Cassandra 2.1") - MAX_NESTING_DEPTH = 128 + MAX_NESTING_DEPTH = 16 c = Cluster(protocol_version=PROTOCOL_VERSION) s = c.connect() @@ -392,7 +396,7 @@ def test_nested_unregistered_udts(self): if self._cass_version < (2, 1, 0): raise unittest.SkipTest("The tuple type was introduced in Cassandra 2.1") - MAX_NESTING_DEPTH = 128 + MAX_NESTING_DEPTH = 16 c = Cluster(protocol_version=PROTOCOL_VERSION) s = c.connect() @@ -452,13 +456,13 @@ def test_nested_registered_udts_with_different_namedtuples(self): created namedtuples are use names that are different the cql type. Future improvement: optimize these three related tests using a single - helper method to cut down on code reuse. + helper method to cut down on code repetition. """ if self._cass_version < (2, 1, 0): raise unittest.SkipTest("The tuple type was introduced in Cassandra 2.1") - MAX_NESTING_DEPTH = 128 + MAX_NESTING_DEPTH = 16 c = Cluster(protocol_version=PROTOCOL_VERSION) s = c.connect() From 56267768f292c3fb9d9366e8361f548f6ec1c993 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 12 Dec 2014 16:30:43 -0600 Subject: [PATCH 1080/3726] Add logging statements to trace cluster changes in integration tests --- tests/integration/__init__.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 094ccb3c8e..fee7980904 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -132,10 +132,12 @@ def is_current_cluster(cluster_name, node_counts): def use_cluster(cluster_name, nodes, ipformat=None, start=True): if is_current_cluster(cluster_name, nodes): + log.debug("Using existing cluster %s", cluster_name) return global CCM_CLUSTER if CCM_CLUSTER: + log.debug("Stopping cluster %s", CCM_CLUSTER.name) CCM_CLUSTER.stop() try: From e316560582032350f772552a494edf100b0011e7 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 15 Dec 2014 12:15:28 -0600 Subject: [PATCH 1081/3726] Make create statement in test_udt_sizes use timeout --- tests/integration/standard/test_udts.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/standard/test_udts.py b/tests/integration/standard/test_udts.py index f8af72bd1f..87e76b3559 100644 --- a/tests/integration/standard/test_udts.py +++ b/tests/integration/standard/test_udts.py @@ -297,7 +297,7 @@ def test_udt_sizes(self): # no need for all nested types, only a spot checked few and the largest one s.execute("CREATE TABLE mytable (" "k int PRIMARY KEY, " - "v frozen)") + "v frozen)", timeout=EXTENDED_QUERY_TIMEOUT) # create and register the seed udt type udt = namedtuple('lengthy_udt', tuple(['v_{}'.format(i) for i in range(MAX_TEST_LENGTH)])) From 9c8d7737db2df5d2bc59909e00ea653686344485 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 15 Dec 2014 14:27:10 -0600 Subject: [PATCH 1082/3726] TokenAware statement keyspace tests. PYTHON-181 --- tests/unit/test_parameter_binding.py | 22 +++++++++++ tests/unit/test_policies.py | 56 ++++++++++++++++++++++++++++ 2 files changed, 78 insertions(+) diff --git a/tests/unit/test_parameter_binding.py b/tests/unit/test_parameter_binding.py index 3137195b42..efb7ff1aed 100644 --- a/tests/unit/test_parameter_binding.py +++ b/tests/unit/test_parameter_binding.py @@ -128,3 +128,25 @@ def test_inherit_fetch_size(self): fetch_size=1234) bound_statement = BoundStatement(prepared_statement=prepared_statement) self.assertEqual(1234, bound_statement.fetch_size) + + def test_too_few_parameters_for_key(self): + keyspace = 'keyspace1' + column_family = 'cf1' + + column_metadata = [ + (keyspace, column_family, 'foo1', Int32Type), + (keyspace, column_family, 'foo2', Int32Type) + ] + + prepared_statement = PreparedStatement(column_metadata=column_metadata, + query_id=None, + routing_key_indexes=[0, 1], + query=None, + keyspace=keyspace, + protocol_version=2, + fetch_size=1234) + + self.assertRaises(ValueError, prepared_statement.bind, (1,)) + + bound = prepared_statement.bind((1,2)) + self.assertEqual(bound.keyspace, keyspace) diff --git a/tests/unit/test_policies.py b/tests/unit/test_policies.py index ab098f3926..bd8426cd50 100644 --- a/tests/unit/test_policies.py +++ b/tests/unit/test_policies.py @@ -520,6 +520,62 @@ def test_status_updates(self): self.assertEqual(qplan, []) + def test_statement_keyspace(self): + hosts = [Host(str(i), SimpleConvictionPolicy) for i in range(4)] + for host in hosts: + host.set_up() + + cluster = Mock(spec=Cluster) + cluster.metadata = Mock(spec=Metadata) + replicas = hosts[2:] + cluster.metadata.get_replicas.return_value = replicas + + child_policy = Mock() + child_policy.make_query_plan.return_value = hosts + child_policy.distance.return_value = HostDistance.LOCAL + + policy = TokenAwarePolicy(child_policy) + policy.populate(cluster, hosts) + + # no keyspace, child policy is called + keyspace = None + routing_key = 'routing_key' + query = Statement(routing_key=routing_key) + qplan = list(policy.make_query_plan(keyspace, query)) + self.assertEqual(hosts, qplan) + self.assertEqual(cluster.metadata.get_replicas.call_count, 0) + child_policy.make_query_plan.assert_called_once_with(keyspace, query) + + # working keyspace, no statement + cluster.metadata.get_replicas.reset_mock() + keyspace = 'working_keyspace' + routing_key = 'routing_key' + query = Statement(routing_key=routing_key) + qplan = list(policy.make_query_plan(keyspace, query)) + self.assertEqual(replicas + hosts[:2], qplan) + cluster.metadata.get_replicas.assert_called_with(keyspace, routing_key) + + # statement keyspace, no working + cluster.metadata.get_replicas.reset_mock() + working_keyspace = None + statement_keyspace = 'statement_keyspace' + routing_key = 'routing_key' + query = Statement(routing_key=routing_key, keyspace=statement_keyspace) + qplan = list(policy.make_query_plan(working_keyspace, query)) + self.assertEqual(replicas + hosts[:2], qplan) + cluster.metadata.get_replicas.assert_called_with(statement_keyspace, routing_key) + + # both keyspaces set, statement keyspace used for routing + cluster.metadata.get_replicas.reset_mock() + working_keyspace = 'working_keyspace' + statement_keyspace = 'statement_keyspace' + routing_key = 'routing_key' + query = Statement(routing_key=routing_key, keyspace=statement_keyspace) + qplan = list(policy.make_query_plan(working_keyspace, query)) + self.assertEqual(replicas + hosts[:2], qplan) + cluster.metadata.get_replicas.assert_called_with(statement_keyspace, routing_key) + + class ConvictionPolicyTest(unittest.TestCase): def test_not_implemented(self): """ From 40d9e3f26f12be7e5635e34691d08d2476a81080 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 15 Dec 2014 15:05:43 -0600 Subject: [PATCH 1083/3726] Put a few more checks around test_metadata keyspace alteration test PYTHON-173 --- tests/integration/standard/test_metadata.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 82d9d09a57..f351f04751 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -577,7 +577,7 @@ def test_getting_replicas(self): self.assertEqual(set(replicas), set([expected_host])) -class TableAlterMetadata(unittest.TestCase): +class KeyspaceAlterMetadata(unittest.TestCase): """ Test verifies that table metadata is preserved on keyspace alter """ @@ -601,11 +601,17 @@ def test_keyspace_alter(self): Verify schema Alter ks Verify that table metadata is still present + + PYTHON-173 """ name = self._testMethodName.lower() self.session.execute('CREATE TABLE %s.d (d INT PRIMARY KEY)' % name) - self.assertTrue(self.cluster.metadata.keyspaces[name].tables) + original_keyspace_meta = self.cluster.metadata.keyspaces[name] + self.assertEqual(original_keyspace_meta.durable_writes, True) + self.assertEqual(len(original_keyspace_meta.tables), 1) self.session.execute('ALTER KEYSPACE %s WITH durable_writes = false' %name) - self.assertTrue(self.cluster.metadata.keyspaces[name].tables) + new_keyspace_meta = self.cluster.metadata.keyspaces[name] + self.assertNotEqual(original_keyspace_meta, new_keyspace_meta) + self.assertEqual(new_keyspace_meta.durable_writes, False) From 21d49a6645639f2b8c8504eb82bcc160ab8ea9e7 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 15 Dec 2014 16:05:22 -0600 Subject: [PATCH 1084/3726] Handle schema meta when column meta missing workaround for CASSANDRA-8487 --- cassandra/metadata.py | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 910aad9bda..bc1d8e1ab4 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -119,10 +119,12 @@ def rebuild_schema(self, ks_results, type_results, cf_results, col_results, trig current_keyspaces = set() for row in ks_results: keyspace_meta = self._build_keyspace_metadata(row) + keyspace_col_rows = col_def_rows.get(keyspace_meta.name, {}) + keyspace_trigger_rows = trigger_rows.get(keyspace_meta.name, {}) for table_row in cf_def_rows.get(keyspace_meta.name, []): table_meta = self._build_table_metadata( - keyspace_meta, table_row, col_def_rows[keyspace_meta.name], - trigger_rows.get(keyspace_meta.name, {})) + keyspace_meta, table_row, keyspace_col_rows, + keyspace_trigger_rows) keyspace_meta.tables[table_meta.name] = table_meta for usertype_row in usertype_rows.get(keyspace_meta.name, []): @@ -217,6 +219,10 @@ def _build_table_metadata(self, keyspace_metadata, row, col_rows, trigger_rows): cfname = row["columnfamily_name"] cf_col_rows = col_rows.get(cfname, []) + if not cf_col_rows: # CASSANDRA-8487 + log.warning("Building table metadata with no column meta for %s.%s", + keyspace_metadata.name, cfname) + comparator = types.lookup_casstype(row["comparator"]) if issubclass(comparator, types.CompositeType): column_name_types = comparator.subtypes @@ -322,20 +328,20 @@ def _build_table_metadata(self, keyspace_metadata, row, col_rows, trigger_rows): value_alias = "value" else: value_alias = row.get("value_alias", None) - if value_alias is None: + if value_alias is None and value_alias_rows: # CASSANDRA-8487 # In 2.0+, we can use the 'type' column. In 3.0+, we have to use it. - assert len(value_alias_rows) == 1 value_alias = value_alias_rows[0].get('column_name') default_validator = row.get("default_validator") if default_validator: validator = types.lookup_casstype(default_validator) else: - assert len(value_alias_rows) == 1 - validator = types.lookup_casstype(value_alias_rows[0].get('validator')) + if value_alias_rows: # CASSANDRA-8487 + validator = types.lookup_casstype(value_alias_rows[0].get('validator')) col = ColumnMetadata(table_meta, value_alias, validator) - table_meta.columns[value_alias] = col + if value_alias: # CASSANDRA-8487 + table_meta.columns[value_alias] = col # other normal columns for col_row in cf_col_rows: From ddf0eb9d10dd58de0f2b9f50de7815ce346b3c7b Mon Sep 17 00:00:00 2001 From: Irina Kaplounova Date: Tue, 16 Dec 2014 01:13:17 -0800 Subject: [PATCH 1085/3726] Test for PYTHON-180 --- tests/unit/test_exception.py | 56 ++++++++++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) create mode 100644 tests/unit/test_exception.py diff --git a/tests/unit/test_exception.py b/tests/unit/test_exception.py new file mode 100644 index 0000000000..f882c537db --- /dev/null +++ b/tests/unit/test_exception.py @@ -0,0 +1,56 @@ +# Copyright 2014 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +try: + import unittest2 as unittest +except ImportError: + import unittest + +from cassandra import Unavailable, Timeout, ConsistencyLevel +import re + + +class ConsistencyExceptionTest(unittest.TestCase): + """ + Verify Cassandra Exception string representation + """ + + def extract_consistency(self, msg): + """ + Given message that has 'consistency': 'value', extract consistency value as a string + :param msg: message with consistency value + :return: String representing consistency value + """ + match = re.search("'consistency':\s+'([\w\s]+)'", msg) + return match and match.group(1) + + def test_timeout_consistency(self): + """ + Verify that Timeout exception object translates consistency from input value to correct output string + """ + consistency_str = self.extract_consistency(repr(Timeout("Timeout Message", consistency=None))) + self.assertEqual(consistency_str, 'Not Set') + for c in ConsistencyLevel.value_to_name.keys(): + consistency_str = self.extract_consistency(repr(Timeout("Timeout Message", consistency=c))) + self.assertEqual(consistency_str, ConsistencyLevel.value_to_name[c]) + + def test_unavailable_consistency(self): + """ + Verify that Unavailable exception object translates consistency from input value to correct output string + """ + consistency_str = self.extract_consistency(repr(Unavailable("Unavailable Message", consistency=None))) + self.assertEqual(consistency_str, 'Not Set') + for c in ConsistencyLevel.value_to_name.keys(): + consistency_str = self.extract_consistency(repr(Unavailable("Timeout Message", consistency=c))) + self.assertEqual(consistency_str, ConsistencyLevel.value_to_name[c]) From 524e71795489e94cf7c2361f027c965e9b19541b Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 16 Dec 2014 09:24:57 -0600 Subject: [PATCH 1086/3726] Routing key test including compound keys. PYTHON-184 --- tests/integration/__init__.py | 3 + tests/integration/standard/test_routing.py | 97 ++++++++++++++++++++++ 2 files changed, 100 insertions(+) create mode 100644 tests/integration/standard/test_routing.py diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index fee7980904..339254a25d 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -165,6 +165,9 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True): def teardown_package(): + # when multiple modules are run explicitly, this runs between them + # need to make sure CCM_CLUSTER is properly cleared for that case + remove_cluster() for cluster_name in [CLUSTER_NAME, MULTIDC_CLUSTER_NAME]: try: cluster = CCMClusterFactory.load(path, cluster_name) diff --git a/tests/integration/standard/test_routing.py b/tests/integration/standard/test_routing.py new file mode 100644 index 0000000000..9d09cc98f4 --- /dev/null +++ b/tests/integration/standard/test_routing.py @@ -0,0 +1,97 @@ +# Copyright 2013-2014 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa + +from uuid import uuid1 + +import logging +log = logging.getLogger(__name__) + +from cassandra.cluster import Cluster + +from tests.integration import use_singledc, PROTOCOL_VERSION + + +def setup_module(): + use_singledc() + + +class RoutingTests(unittest.TestCase): + + @property + def cfname(self): + return self._testMethodName.lower() + + @classmethod + def setup_class(cls): + cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION) + cls.session = cls.cluster.connect('test1rf') + + @classmethod + def teardown_class(cls): + cls.cluster.shutdown() + + def insert_select_token(self, insert, select, key_values): + s = self.session + + bound = insert.bind(key_values) + s.execute(bound) + + my_token = s.cluster.metadata.token_map.token_class.from_key(bound.routing_key) + + cass_token = s.execute(select, key_values)[0][0] + token = s.cluster.metadata.token_map.token_class(cass_token) + self.assertEqual(my_token, token) + + def create_prepare(self, key_types): + s = self.session + table_name = "%s_%s" % (self.cfname, '_'.join(key_types)) + key_count = len(key_types) + key_decl = ', '.join(['k%d %s' % (i, t) for i, t in enumerate(key_types)]) + primary_key = ', '.join(['k%d' % i for i in range(key_count)]) + s.execute("CREATE TABLE %s (%s, v int, PRIMARY KEY ((%s)))" % + (table_name, key_decl, primary_key)) + + parameter_places = ', '.join(['?'] * key_count) + insert = s.prepare("INSERT INTO %s (%s, v) VALUES (%s, 1)" % + (table_name, primary_key, parameter_places)) + + where_clause = ' AND '.join(['k%d = ?' % i for i in range(key_count)]) + select = s.prepare("SELECT token(%s) FROM %s WHERE %s" % + (primary_key, table_name, where_clause)) + + return (insert, select) + + def test_singular_key(self): + # string + insert, select = self.create_prepare(('text',)) + self.insert_select_token(insert, select, ('some text value',)) + + # non-string + insert, select = self.create_prepare(('bigint',)) + self.insert_select_token(insert, select, (12390890177098123,)) + + def test_composite(self): + # double bool + insert, select = self.create_prepare(('double', 'boolean')) + self.insert_select_token(insert, select, (3.1459, True)) + self.insert_select_token(insert, select, (1.21e9, False)) + + # uuid string int + insert, select = self.create_prepare(('timeuuid', 'varchar', 'int')) + self.insert_select_token(insert, select, (uuid1(), 'asdf', 400)) + self.insert_select_token(insert, select, (uuid1(), 'fdsa', -1)) From 632f1c1cfb3b26a30a81a23a77550dd95d63b5fa Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 16 Dec 2014 10:30:10 -0600 Subject: [PATCH 1087/3726] Clarifiy registered types user_defined_types.rst --- docs/user_defined_types.rst | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docs/user_defined_types.rst b/docs/user_defined_types.rst index 08347fb1a8..714b4f0650 100644 --- a/docs/user_defined_types.rst +++ b/docs/user_defined_types.rst @@ -85,6 +85,8 @@ for the UDT: As shown in the code example, inserting data for UDT columns without registering a class works fine for prepared statements. However, **you must register a -class to insert UDT columns with unprepared statements**. You can still query +class to insert UDT columns with unprepared statements** *. You can still query UDT columns without registered classes using unprepared statements, they will simply return ``namedtuple`` instances (just like prepared statements do). + +\* this applies to *parameterized* unprepared statements, in which the driver will be formatting parameters -- not statements with interpolated UDT literals. From 6483ba61e4ae789bead2e410f96e514fe9a50079 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 16 Dec 2014 10:41:37 -0600 Subject: [PATCH 1088/3726] Escape asterisk user_defined_types.rst --- docs/user_defined_types.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/user_defined_types.rst b/docs/user_defined_types.rst index 714b4f0650..2310689d36 100644 --- a/docs/user_defined_types.rst +++ b/docs/user_defined_types.rst @@ -85,7 +85,7 @@ for the UDT: As shown in the code example, inserting data for UDT columns without registering a class works fine for prepared statements. However, **you must register a -class to insert UDT columns with unprepared statements** *. You can still query +class to insert UDT columns with unprepared statements**.\* You can still query UDT columns without registered classes using unprepared statements, they will simply return ``namedtuple`` instances (just like prepared statements do). From 9ad8c97a0ab37e74ac5f5288ab4a547024dc4717 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 16 Dec 2014 11:51:01 -0600 Subject: [PATCH 1089/3726] changelog update --- CHANGELOG.rst | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 4974cbd0a0..67182f02e3 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,27 @@ +2.1.3 +===== +December 16, 2014 + +Features +-------- +* INFO-level log confirmation that a connection was opened to a node that was marked up (PYTHON-116) +* Avoid connecting to peer with incomplete metadata (PYTHON-163) +* Add SSL support to gevent reactor (PYTHON-174) +* Use control connection timeout in wait for schema agreement (PYTHON-175) +* Better consistency level representation in unavailable+timeout exceptions (PYTHON-180) +* Update schema metadata processing to accommodate coming schema modernization (PYTHON-185) + +Bug Fixes +--------- +* Support large negative timestamps on Windows (PYTHON-119) +* Fix schema agreement for clusters with peer rpc_addres 0.0.0.0 (PYTHON-166) +* Retain table metadata following keyspace meta refresh (PYTHON-173) +* Use a timeout when preparing a statement for all nodes (PYTHON-179) +* Make TokenAware routing tolerant of statements with no keyspace (PYTHON-181) +* Update add_collback to store/invoke multiple callbacks (PYTHON-182) +* Correct routing key encoding for composite keys (PYTHON-184) +* Include compression option in schema export string when disabled (PYTHON-187) + 2.1.2 ===== October 16, 2014 From e577577f1c1ac3d4da18086f9c669b21e1569fed Mon Sep 17 00:00:00 2001 From: Jeremiah Jordan Date: Thu, 11 Dec 2014 13:47:34 -0600 Subject: [PATCH 1090/3726] PYTHON-191 Add support for custom replication strategies. --- cassandra/metadata.py | 87 ++++++++++++++++++++++++++++--------- tests/unit/test_metadata.py | 20 +++------ 2 files changed, 72 insertions(+), 35 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index ec9062a712..e967d6ea46 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -425,25 +425,40 @@ def all_hosts(self): with self._hosts_lock: return self._hosts.values() - -class ReplicationStrategy(object): +REPLICATION_STRATEGY_CLASS_PREFIX = "org.apache.cassandra.locator." +def trim_if_startswith(s, prefix): + if s.startswith(prefix): + return s[len(prefix):] + return s + +_replication_strategies = {} +class ReplicationStrategyTypeType(type): + def __new__(metacls, name, bases, dct): + dct.setdefault('name', name) + cls = type.__new__(metacls, name, bases, dct) + if not name.startswith('_'): + _replication_strategies[name] = cls + return cls + + +@six.add_metaclass(ReplicationStrategyTypeType) +class _ReplicationStrategy(object): + options_map = None @classmethod def create(cls, strategy_class, options_map): if not strategy_class: return None - if strategy_class.endswith("OldNetworkTopologyStrategy"): - return None - elif strategy_class.endswith("NetworkTopologyStrategy"): - return NetworkTopologyStrategy(options_map) - elif strategy_class.endswith("SimpleStrategy"): - repl_factor = options_map.get('replication_factor', None) - if not repl_factor: - return None - return SimpleStrategy(repl_factor) - elif strategy_class.endswith("LocalStrategy"): - return LocalStrategy() + schema_name = trim_if_startswith(strategy_class, REPLICATION_STRATEGY_CLASS_PREFIX) + + rs_class = _replication_strategies.get(schema_name, None) + if rs_class is None: + rs_class = _UnknownStrategyBuilder(schema_name) + _replication_strategies[schema_name] = rs_class + + rs_instance = rs_class(options_map) + return rs_instance def make_token_replica_map(self, token_to_host_owner, ring): raise NotImplementedError() @@ -452,16 +467,50 @@ def export_for_schema(self): raise NotImplementedError() -class SimpleStrategy(ReplicationStrategy): +ReplicationStrategy=_ReplicationStrategy - name = "SimpleStrategy" + +class _UnknownStrategyBuilder(object): + def __init__(self, name): + self.name = name + + def __call__(self, options_map): + strategy_instance = _UnknownStrategy(self.name, options_map) + return strategy_instance + + +class _UnknownStrategy(ReplicationStrategy): + def __init__(self, name, options_map): + self.name = name + self.options_map = options_map.copy() if options_map is not None else dict() + self.options_map['class'] = self.name + + def __eq__(self, other): + return (isinstance(other, _UnknownStrategy) + and self.name == other.name + and self.options_map == other.options_map) + + def export_for_schema(self): + """ + Returns a string version of these replication options which are + suitable for use in a CREATE KEYSPACE statement. + """ + if self.options_map: + return dict((str(key), str(value)) for key, value in self.options_map.items()) + return "{'class': '%s'}" % (self.name, ) + + +class SimpleStrategy(ReplicationStrategy): replication_factor = None """ The replication factor for this keyspace. """ - def __init__(self, replication_factor): + def __init__(self, options_map): + replication_factor = options_map.get('replication_factor', None) if isinstance(options_map, dict) else options_map + if not isinstance(replication_factor, int): + raise TypeError("replication_factor must be an int") self.replication_factor = int(replication_factor) def make_token_replica_map(self, token_to_host_owner, ring): @@ -495,8 +544,6 @@ def __eq__(self, other): class NetworkTopologyStrategy(ReplicationStrategy): - name = "NetworkTopologyStrategy" - dc_replication_factors = None """ A map of datacenter names to the replication factor for that DC. @@ -579,8 +626,8 @@ def __eq__(self, other): class LocalStrategy(ReplicationStrategy): - - name = "LocalStrategy" + def __init__(self, options_map): + None def make_token_replica_map(self, token_to_host_owner, ring): return {} diff --git a/tests/unit/test_metadata.py b/tests/unit/test_metadata.py index a4d6af0ba4..ab52404eed 100644 --- a/tests/unit/test_metadata.py +++ b/tests/unit/test_metadata.py @@ -26,7 +26,8 @@ NetworkTopologyStrategy, SimpleStrategy, LocalStrategy, NoMurmur3, protect_name, protect_names, protect_value, is_valid_name, - UserType, KeyspaceMetadata, Metadata) + UserType, KeyspaceMetadata, Metadata, + _UnknownStrategy) from cassandra.policies import SimpleConvictionPolicy from cassandra.pool import Host @@ -47,36 +48,25 @@ def test_replication_strategy(self): rs = ReplicationStrategy() - self.assertEqual(rs.create('OldNetworkTopologyStrategy', None), None) - self.assertEqual(rs.create('xxxxxxOldNetworkTopologyStrategy', None), None) + self.assertEqual(rs.create('OldNetworkTopologyStrategy', None), _UnknownStrategy('OldNetworkTopologyStrategy', None)) fake_options_map = {'dc1': '3'} self.assertIsInstance(rs.create('NetworkTopologyStrategy', fake_options_map), NetworkTopologyStrategy) self.assertEqual(rs.create('NetworkTopologyStrategy', fake_options_map).dc_replication_factors, NetworkTopologyStrategy(fake_options_map).dc_replication_factors) - self.assertIsInstance(rs.create('xxxxxxNetworkTopologyStrategy', fake_options_map), NetworkTopologyStrategy) - self.assertEqual(rs.create('xxxxxxNetworkTopologyStrategy', fake_options_map).dc_replication_factors, - NetworkTopologyStrategy(fake_options_map).dc_replication_factors) - fake_options_map = {'options': 'map'} - self.assertEqual(rs.create('SimpleStrategy', fake_options_map), None) - self.assertEqual(rs.create('xxxxxxSimpleStrategy', fake_options_map), None) + self.assertRaises(TypeError, rs.create, 'SimpleStrategy', fake_options_map) fake_options_map = {'options': 'map'} self.assertIsInstance(rs.create('LocalStrategy', fake_options_map), LocalStrategy) - self.assertIsInstance(rs.create('xxxxxxLocalStrategy', fake_options_map), LocalStrategy) fake_options_map = {'options': 'map', 'replication_factor': 3} self.assertIsInstance(rs.create('SimpleStrategy', fake_options_map), SimpleStrategy) self.assertEqual(rs.create('SimpleStrategy', fake_options_map).replication_factor, SimpleStrategy(fake_options_map['replication_factor']).replication_factor) - self.assertIsInstance(rs.create('xxxxxxSimpleStrategy', fake_options_map), SimpleStrategy) - self.assertEqual(rs.create('xxxxxxSimpleStrategy', fake_options_map).replication_factor, - SimpleStrategy(fake_options_map['replication_factor']).replication_factor) - - self.assertEqual(rs.create('xxxxxxxx', fake_options_map), None) + self.assertEqual(rs.create('xxxxxxxx', fake_options_map), _UnknownStrategy('xxxxxxxx', fake_options_map)) self.assertRaises(NotImplementedError, rs.make_token_replica_map, None, None) self.assertRaises(NotImplementedError, rs.export_for_schema) From d9e90740b93163fbf4633da2c98fb86e3feb6626 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 16 Dec 2014 21:26:30 +0000 Subject: [PATCH 1091/3726] Release version --- cassandra/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 9a710c601e..286d82253b 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -23,7 +23,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (2, 1, 2, 'post') +__version_info__ = (2, 1, 3) __version__ = '.'.join(map(str, __version_info__)) From f1163a9a9aac4ab5de937533fc9693b58e7da121 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 16 Dec 2014 21:32:41 +0000 Subject: [PATCH 1092/3726] post release version --- cassandra/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 286d82253b..93c53dcce4 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -23,7 +23,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (2, 1, 3) +__version_info__ = (2, 1, 3, 'post') __version__ = '.'.join(map(str, __version_info__)) From 1726f6a0cb0b764dc5af927d6b1a613ea21fb863 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 18 Dec 2014 14:07:26 -0600 Subject: [PATCH 1093/3726] SaslAuth docstring update, additional ctor guard copy/paste error not sure on release version yet --- cassandra/auth.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/cassandra/auth.py b/cassandra/auth.py index 0d50851408..77c5bcf985 100644 --- a/cassandra/auth.py +++ b/cassandra/auth.py @@ -130,12 +130,12 @@ def evaluate_challenge(self, challenge): class SaslAuthProvider(AuthProvider): """ - An :class:`~.AuthProvider` that works with DSE's KerberosAuthenticator. + An :class:`~.AuthProvider` for Kerberos authenticators Example usage:: from cassandra.cluster import Cluster - from cassandra.auth import PlainTextAuthProvider + from cassandra.auth import SaslAuthProvider sasl_kwargs = {'host': 'localhost', 'service': 'dse', @@ -144,13 +144,13 @@ class SaslAuthProvider(AuthProvider): auth_provider = SaslAuthProvider(**sasl_kwargs) cluster = Cluster(auth_provider=auth_provider) - .. versionadded:: 2.1.4 + .. versionadded:: 2.1.3-post """ def __init__(self, **sasl_kwargs): - self.sasl_kwargs = sasl_kwargs if SASLClient is None: raise ImportError('The puresasl library has not been installed') + self.sasl_kwargs = sasl_kwargs def new_authenticator(self, host): return SaslAuthenticator(**self.sasl_kwargs) @@ -159,10 +159,12 @@ class SaslAuthenticator(Authenticator): """ An :class:`~.Authenticator` that works with DSE's KerberosAuthenticator. - .. versionadded:: 2.1.4 + .. versionadded:: 2.1.3-post """ def __init__(self, host, service, mechanism='GSSAPI', **sasl_kwargs): + if SASLClient is None: + raise ImportError('The puresasl library has not been installed') self.sasl = SASLClient(host, service, mechanism, **sasl_kwargs) def initial_response(self): From cf330df53cf1eff04520cbd21f69aa8c8329dc1d Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 18 Dec 2014 14:08:57 -0600 Subject: [PATCH 1094/3726] Added SaslAuth test for #215 PYTHON-109 --- .../standard/test_authentication.py | 56 +++++++++---------- 1 file changed, 27 insertions(+), 29 deletions(-) diff --git a/tests/integration/standard/test_authentication.py b/tests/integration/standard/test_authentication.py index c6090af66a..08832ecd0f 100644 --- a/tests/integration/standard/test_authentication.py +++ b/tests/integration/standard/test_authentication.py @@ -16,7 +16,7 @@ from tests.integration import use_singledc, get_cluster, remove_cluster, PROTOCOL_VERSION from cassandra.cluster import Cluster, NoHostAvailable -from cassandra.auth import PlainTextAuthProvider +from cassandra.auth import PlainTextAuthProvider, SASLClient, SaslAuthProvider try: @@ -29,6 +29,13 @@ def setup_module(): use_singledc(start=False) + ccm_cluster = get_cluster() + ccm_cluster.stop() + config_options = {'authenticator': 'PasswordAuthenticator', + 'authorizer': 'CassandraAuthorizer'} + ccm_cluster.set_configuration_options(config_options) + log.debug("Starting ccm test cluster with %s", config_options) + ccm_cluster.start(wait_for_binary_proto=True) def teardown_module(): @@ -40,25 +47,7 @@ class AuthenticationTests(unittest.TestCase): Tests to cover basic authentication functionality """ - @staticmethod - def init(config_options): - ccm_cluster = get_cluster() - ccm_cluster.stop() - ccm_cluster.set_configuration_options(config_options) - log.debug("Starting ccm test cluster with %s", config_options) - ccm_cluster.start(wait_for_binary_proto=True) - - @classmethod - def setup_class(cls): - cls.init({'authenticator': 'PasswordAuthenticator', - 'authorizer': 'AllowAllAuthorizer'}) - - @staticmethod - def v1_authentication_provider(username, password): - return dict(username=username, password=password) - - @staticmethod - def get_authentication_provider(username, password): + def get_authentication_provider(self, username, password): """ Return correct authentication provider based on protocol version. There is a difference in the semantics of authentication provider argument with protocol versions 1 and 2 @@ -74,10 +63,9 @@ def get_authentication_provider(username, password): else: return PlainTextAuthProvider(username=username, password=password) - @staticmethod - def cluster_as(usr, pwd): + def cluster_as(self, usr, pwd): return Cluster(protocol_version=PROTOCOL_VERSION, - auth_provider=AuthenticationTests.get_authentication_provider(username=usr, password=pwd)) + auth_provider=self.get_authentication_provider(username=usr, password=pwd)) def test_auth_connect(self): user = 'u' @@ -122,12 +110,22 @@ def test_connect_no_auth_provider(self): cluster.connect) -class AuthorizedAuthenticationTests(AuthenticationTests): +class SaslAuthenticatorTests(AuthenticationTests): """ - Same test as AuthenticationTests but enables authorization + Test SaslAuthProvider as PlainText """ - @classmethod - def setup_class(cls): - cls.init({'authenticator': 'PasswordAuthenticator', - 'authorizer': 'CassandraAuthorizer'}) + def setUp(self): + if PROTOCOL_VERSION < 2: + raise unittest.SkipTest('Sasl authentication not available for protocol v1') + if SASLClient is None: + raise unittest.SkipTest('pure-sasl is not installed') + + def get_authentication_provider(self, username, password): + sasl_kwargs = {'host': 'localhost', + 'service': 'cassandra', + 'mechanism': 'PLAIN', + 'qops': ['auth'], + 'username': username, + 'password': password} + return SaslAuthProvider(**sasl_kwargs) From fd2db2f9011843fdd3b57f5cb78bf5af805f92a3 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 18 Dec 2014 15:29:10 -0600 Subject: [PATCH 1095/3726] Remove int type requirement, revert TypeError in favor of None return also update tests --- cassandra/metadata.py | 15 ++++++++++----- tests/unit/test_metadata.py | 12 ++++++------ 2 files changed, 16 insertions(+), 11 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index e967d6ea46..d6925fcf85 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -457,7 +457,12 @@ def create(cls, strategy_class, options_map): rs_class = _UnknownStrategyBuilder(schema_name) _replication_strategies[schema_name] = rs_class - rs_instance = rs_class(options_map) + try: + rs_instance = rs_class(options_map) + except Exception as exc: + log.warn("Failed creating %s with options %s: %s", schema_name, options_map, exc.message) + return None + return rs_instance def make_token_replica_map(self, token_to_host_owner, ring): @@ -508,10 +513,10 @@ class SimpleStrategy(ReplicationStrategy): """ def __init__(self, options_map): - replication_factor = options_map.get('replication_factor', None) if isinstance(options_map, dict) else options_map - if not isinstance(replication_factor, int): - raise TypeError("replication_factor must be an int") - self.replication_factor = int(replication_factor) + try: + self.replication_factor = int(options_map['replication_factor']) + except Exception: + raise ValueError("SimpleStrategy requires an integer 'replication_factor' option") def make_token_replica_map(self, token_to_host_owner, ring): replica_map = {} diff --git a/tests/unit/test_metadata.py b/tests/unit/test_metadata.py index ab52404eed..bdb7d5f1e0 100644 --- a/tests/unit/test_metadata.py +++ b/tests/unit/test_metadata.py @@ -56,7 +56,7 @@ def test_replication_strategy(self): NetworkTopologyStrategy(fake_options_map).dc_replication_factors) fake_options_map = {'options': 'map'} - self.assertRaises(TypeError, rs.create, 'SimpleStrategy', fake_options_map) + self.assertIsNone(rs.create('SimpleStrategy', fake_options_map)) fake_options_map = {'options': 'map'} self.assertIsInstance(rs.create('LocalStrategy', fake_options_map), LocalStrategy) @@ -64,7 +64,7 @@ def test_replication_strategy(self): fake_options_map = {'options': 'map', 'replication_factor': 3} self.assertIsInstance(rs.create('SimpleStrategy', fake_options_map), SimpleStrategy) self.assertEqual(rs.create('SimpleStrategy', fake_options_map).replication_factor, - SimpleStrategy(fake_options_map['replication_factor']).replication_factor) + SimpleStrategy(fake_options_map).replication_factor) self.assertEqual(rs.create('xxxxxxxx', fake_options_map), _UnknownStrategy('xxxxxxxx', fake_options_map)) @@ -132,23 +132,23 @@ def test_simple_strategy_make_token_replica_map(self): } ring = [MD5Token(0), MD5Token(100), MD5Token(200)] - rf1_replicas = SimpleStrategy(1).make_token_replica_map(token_to_host_owner, ring) + rf1_replicas = SimpleStrategy({'replication_factor': '1'}).make_token_replica_map(token_to_host_owner, ring) self.assertItemsEqual(rf1_replicas[MD5Token(0)], [host1]) self.assertItemsEqual(rf1_replicas[MD5Token(100)], [host2]) self.assertItemsEqual(rf1_replicas[MD5Token(200)], [host3]) - rf2_replicas = SimpleStrategy(2).make_token_replica_map(token_to_host_owner, ring) + rf2_replicas = SimpleStrategy({'replication_factor': '2'}).make_token_replica_map(token_to_host_owner, ring) self.assertItemsEqual(rf2_replicas[MD5Token(0)], [host1, host2]) self.assertItemsEqual(rf2_replicas[MD5Token(100)], [host2, host3]) self.assertItemsEqual(rf2_replicas[MD5Token(200)], [host3, host1]) - rf3_replicas = SimpleStrategy(3).make_token_replica_map(token_to_host_owner, ring) + rf3_replicas = SimpleStrategy({'replication_factor': '3'}).make_token_replica_map(token_to_host_owner, ring) self.assertItemsEqual(rf3_replicas[MD5Token(0)], [host1, host2, host3]) self.assertItemsEqual(rf3_replicas[MD5Token(100)], [host2, host3, host1]) self.assertItemsEqual(rf3_replicas[MD5Token(200)], [host3, host1, host2]) def test_ss_equals(self): - self.assertNotEqual(SimpleStrategy(1), NetworkTopologyStrategy({'dc1': 2})) + self.assertNotEqual(SimpleStrategy({'replication_factor': '1'}), NetworkTopologyStrategy({'dc1': 2})) class NameEscapingTest(unittest.TestCase): From e52ed1e60e91d846cc3f1ca329466c0d5bbc97e8 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 18 Dec 2014 16:13:54 -0600 Subject: [PATCH 1096/3726] Don't use Exception.message in warn log (python3) --- cassandra/metadata.py | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index d6925fcf85..159d941d91 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -425,13 +425,19 @@ def all_hosts(self): with self._hosts_lock: return self._hosts.values() + REPLICATION_STRATEGY_CLASS_PREFIX = "org.apache.cassandra.locator." + + def trim_if_startswith(s, prefix): if s.startswith(prefix): return s[len(prefix):] return s + _replication_strategies = {} + + class ReplicationStrategyTypeType(type): def __new__(metacls, name, bases, dct): dct.setdefault('name', name) @@ -460,7 +466,7 @@ def create(cls, strategy_class, options_map): try: rs_instance = rs_class(options_map) except Exception as exc: - log.warn("Failed creating %s with options %s: %s", schema_name, options_map, exc.message) + log.warn("Failed creating %s with options %s: %s", schema_name, options_map, exc) return None return rs_instance @@ -472,7 +478,7 @@ def export_for_schema(self): raise NotImplementedError() -ReplicationStrategy=_ReplicationStrategy +ReplicationStrategy = _ReplicationStrategy class _UnknownStrategyBuilder(object): @@ -492,8 +498,8 @@ def __init__(self, name, options_map): def __eq__(self, other): return (isinstance(other, _UnknownStrategy) - and self.name == other.name - and self.options_map == other.options_map) + and self.name == other.name + and self.options_map == other.options_map) def export_for_schema(self): """ @@ -632,7 +638,7 @@ def __eq__(self, other): class LocalStrategy(ReplicationStrategy): def __init__(self, options_map): - None + pass def make_token_replica_map(self, token_to_host_owner, ring): return {} From 509ac79712cd4c6a85518c5a533fc9d450dc679d Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 22 Dec 2014 11:48:08 -0600 Subject: [PATCH 1097/3726] Correct meta logic for composite comparators, compact storage PYTHON-192 --- cassandra/metadata.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index bc1d8e1ab4..f9bc6831a9 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -224,12 +224,13 @@ def _build_table_metadata(self, keyspace_metadata, row, col_rows, trigger_rows): keyspace_metadata.name, cfname) comparator = types.lookup_casstype(row["comparator"]) + if issubclass(comparator, types.CompositeType): column_name_types = comparator.subtypes - is_composite = True + is_composite_comparator = True else: column_name_types = (comparator,) - is_composite = False + is_composite_comparator = False num_column_name_components = len(column_name_types) last_col = column_name_types[-1] @@ -246,7 +247,7 @@ def _build_table_metadata(self, keyspace_metadata, row, col_rows, trigger_rows): else: column_aliases = [r.get('column_name') for r in clustering_rows] - if is_composite: + if is_composite_comparator: if issubclass(last_col, types.ColumnToCollectionType): # collections is_compact = False @@ -261,7 +262,7 @@ def _build_table_metadata(self, keyspace_metadata, row, col_rows, trigger_rows): else: # compact table is_compact = True - has_value = True + has_value = column_aliases or not cf_col_rows clustering_size = num_column_name_components else: is_compact = True From 61b4fad6995c6b1810cb5c04b443ef0378abacd4 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 22 Dec 2014 16:00:20 -0600 Subject: [PATCH 1098/3726] Prevent NIE when unknown repl strategy with TA LBP Unknown replication strategies would raise if token aware load balancing used. Also fixes a more general issue where TokenAware blows up with strategies returning an empty mapping (i.e. LocalStrategy, and Unknown). --- cassandra/metadata.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 159d941d91..d0b200aa7f 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -510,6 +510,9 @@ def export_for_schema(self): return dict((str(key), str(value)) for key, value in self.options_map.items()) return "{'class': '%s'}" % (self.name, ) + def make_token_replica_map(self, token_to_host_owner, ring): + return {} + class SimpleStrategy(ReplicationStrategy): @@ -1210,7 +1213,7 @@ def get_replicas(self, keyspace, token): if tokens_to_hosts is None: self.rebuild_keyspace(keyspace, build_if_absent=True) tokens_to_hosts = self.tokens_to_hosts_by_ks.get(keyspace, None) - if tokens_to_hosts is None: + if not tokens_to_hosts: return [] # token range ownership is exclusive on the LHS (the start token), so From dca76010ed6322f4e8d079aa97e779714d031578 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 22 Dec 2014 16:03:07 -0600 Subject: [PATCH 1099/3726] Improved var name in ReplicationStrategy review input --- cassandra/metadata.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index d0b200aa7f..80c8c0e4a5 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -456,17 +456,17 @@ def create(cls, strategy_class, options_map): if not strategy_class: return None - schema_name = trim_if_startswith(strategy_class, REPLICATION_STRATEGY_CLASS_PREFIX) + strategy_name = trim_if_startswith(strategy_class, REPLICATION_STRATEGY_CLASS_PREFIX) - rs_class = _replication_strategies.get(schema_name, None) + rs_class = _replication_strategies.get(strategy_name, None) if rs_class is None: - rs_class = _UnknownStrategyBuilder(schema_name) - _replication_strategies[schema_name] = rs_class + rs_class = _UnknownStrategyBuilder(strategy_name) + _replication_strategies[strategy_name] = rs_class try: rs_instance = rs_class(options_map) except Exception as exc: - log.warn("Failed creating %s with options %s: %s", schema_name, options_map, exc) + log.warn("Failed creating %s with options %s: %s", strategy_name, options_map, exc) return None return rs_instance From 55ae64c1f0508b35d2dcfa217784068ef1733cd9 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 22 Dec 2014 16:40:00 -0600 Subject: [PATCH 1100/3726] Changelog update for interim tickets. --- CHANGELOG.rst | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 67182f02e3..aaa67c8e5e 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,15 @@ +2.1.3-post +========== + +Features +-------- +* SaslAuthenticator for Kerberos support (PYTHON-109) + +Bug Fixes +--------- +* Schema meta fix for complex thrift tables (PYTHON-191) +* Support for 'unknown' replica placement strategies in schema meta (PYTHON-192) + 2.1.3 ===== December 16, 2014 From 584bf0204cd7c8dd772a7b84419cc668e040aae6 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 23 Dec 2014 15:38:27 -0600 Subject: [PATCH 1101/3726] Add test for variations on tables created with cli PYTHON-192 --- tests/integration/standard/test_metadata.py | 246 ++++++++++++++++++++ 1 file changed, 246 insertions(+) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index f351f04751..f8988fecab 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -535,6 +535,252 @@ def test_token_map(self): self.assertEqual(set(get_replicas('test2rf', token)), set([owners[(i + 1) % 3], owners[(i + 2) % 3]])) self.assertEqual(set(get_replicas('test1rf', token)), set([owners[(i + 1) % 3]])) + def test_legacy_tables(self): + + cli_script = """CREATE KEYSPACE legacy +WITH placement_strategy = 'SimpleStrategy' +AND strategy_options = {replication_factor:1}; + +USE legacy; + +CREATE COLUMN FAMILY simple_no_col + WITH comparator = UTF8Type + AND key_validation_class = UUIDType + AND default_validation_class = UTF8Type; + +CREATE COLUMN FAMILY simple_with_col + WITH comparator = UTF8Type + and key_validation_class = UUIDType + and default_validation_class = UTF8Type + AND column_metadata = [ + {column_name: col_with_meta, validation_class: UTF8Type} + ]; + +CREATE COLUMN FAMILY composite_partition_no_col + WITH comparator = UTF8Type + AND key_validation_class = 'CompositeType(UUIDType,UTF8Type)' + AND default_validation_class = UTF8Type; + +CREATE COLUMN FAMILY composite_partition_with_col + WITH comparator = UTF8Type + AND key_validation_class = 'CompositeType(UUIDType,UTF8Type)' + AND default_validation_class = UTF8Type + AND column_metadata = [ + {column_name: col_with_meta, validation_class: UTF8Type} + ]; + +CREATE COLUMN FAMILY nested_composite_key + WITH comparator = UTF8Type + and key_validation_class = 'CompositeType(CompositeType(UUIDType,UTF8Type), LongType)' + and default_validation_class = UTF8Type + AND column_metadata = [ + {column_name: full_name, validation_class: UTF8Type} + ]; + +create column family composite_comp_no_col + with column_type = 'Standard' + and comparator = 'DynamicCompositeType(t=>org.apache.cassandra.db.marshal.TimeUUIDType,s=>org.apache.cassandra.db.marshal.UTF8Type,b=>org.apache.cassandra.db.marshal.BytesType)' + and default_validation_class = 'BytesType' + and key_validation_class = 'BytesType' + and read_repair_chance = 0.0 + and dclocal_read_repair_chance = 0.1 + and gc_grace = 864000 + and min_compaction_threshold = 4 + and max_compaction_threshold = 32 + and compaction_strategy = 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy' + and caching = 'KEYS_ONLY' + and cells_per_row_to_cache = '0' + and default_time_to_live = 0 + and speculative_retry = 'NONE' + and comment = 'Stores file meta data'; + +create column family composite_comp_with_col + with column_type = 'Standard' + and comparator = 'DynamicCompositeType(t=>org.apache.cassandra.db.marshal.TimeUUIDType,s=>org.apache.cassandra.db.marshal.UTF8Type,b=>org.apache.cassandra.db.marshal.BytesType)' + and default_validation_class = 'BytesType' + and key_validation_class = 'BytesType' + and read_repair_chance = 0.0 + and dclocal_read_repair_chance = 0.1 + and gc_grace = 864000 + and min_compaction_threshold = 4 + and max_compaction_threshold = 32 + and compaction_strategy = 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy' + and caching = 'KEYS_ONLY' + and cells_per_row_to_cache = '0' + and default_time_to_live = 0 + and speculative_retry = 'NONE' + and comment = 'Stores file meta data' + and column_metadata = [ + {column_name : 'b@6d616d6d616a616d6d61', + validation_class : BytesType, + index_name : 'idx_one', + index_type : 0}, + {column_name : 'b@6869746d65776974686d75736963', + validation_class : BytesType, + index_name : 'idx_two', + index_type : 0}] + and compression_options = {'sstable_compression' : 'org.apache.cassandra.io.compress.LZ4Compressor'};""" + + # note: the innerlkey type for legacy.nested_composite_key + # (org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UUIDType, org.apache.cassandra.db.marshal.UTF8Type)) + # is a bit strange, but it replays in CQL with desired results + expected_string = """CREATE KEYSPACE legacy WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'} AND durable_writes = true; + +CREATE TABLE legacy.composite_comp_with_col ( + key blob, + column0 't=>org.apache.cassandra.db.marshal.TimeUUIDType', + column1 'b=>org.apache.cassandra.db.marshal.BytesType', + column2 's=>org.apache.cassandra.db.marshal.UTF8Type', + "b@6869746d65776974686d75736963" blob, + "b@6d616d6d616a616d6d61" blob, + PRIMARY KEY (key, column0, column1, column2) +) WITH COMPACT STORAGE + AND CLUSTERING ORDER BY (column0 ASC, column1 ASC, column2 ASC) + AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}' + AND comment = 'Stores file meta data' + AND compaction = {'min_threshold': '4', 'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32'} + AND compression = {'sstable_compression': 'org.apache.cassandra.io.compress.LZ4Compressor'} + AND dclocal_read_repair_chance = 0.1 + AND default_time_to_live = 0 + AND gc_grace_seconds = 864000 + AND max_index_interval = 2048 + AND memtable_flush_period_in_ms = 0 + AND min_index_interval = 128 + AND read_repair_chance = 0.0 + AND speculative_retry = 'NONE'; +CREATE INDEX idx_two ON legacy.composite_comp_with_col ("b@6869746d65776974686d75736963"); +CREATE INDEX idx_one ON legacy.composite_comp_with_col ("b@6d616d6d616a616d6d61"); + +CREATE TABLE legacy.nested_composite_key ( + key 'org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UUIDType, org.apache.cassandra.db.marshal.UTF8Type)', + key2 bigint, + full_name text, + PRIMARY KEY ((key, key2)) +) WITH COMPACT STORAGE + AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}' + AND comment = '' + AND compaction = {'min_threshold': '4', 'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32'} + AND compression = {'sstable_compression': 'org.apache.cassandra.io.compress.LZ4Compressor'} + AND dclocal_read_repair_chance = 0.1 + AND default_time_to_live = 0 + AND gc_grace_seconds = 864000 + AND max_index_interval = 2048 + AND memtable_flush_period_in_ms = 0 + AND min_index_interval = 128 + AND read_repair_chance = 0.0 + AND speculative_retry = 'NONE'; + +CREATE TABLE legacy.composite_partition_with_col ( + key uuid, + key2 text, + col_with_meta text, + PRIMARY KEY ((key, key2)) +) WITH COMPACT STORAGE + AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}' + AND comment = '' + AND compaction = {'min_threshold': '4', 'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32'} + AND compression = {'sstable_compression': 'org.apache.cassandra.io.compress.LZ4Compressor'} + AND dclocal_read_repair_chance = 0.1 + AND default_time_to_live = 0 + AND gc_grace_seconds = 864000 + AND max_index_interval = 2048 + AND memtable_flush_period_in_ms = 0 + AND min_index_interval = 128 + AND read_repair_chance = 0.0 + AND speculative_retry = 'NONE'; + +CREATE TABLE legacy.composite_partition_no_col ( + key uuid, + key2 text, + column1 text, + value text, + PRIMARY KEY ((key, key2), column1) +) WITH COMPACT STORAGE + AND CLUSTERING ORDER BY (column1 ASC) + AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}' + AND comment = '' + AND compaction = {'min_threshold': '4', 'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32'} + AND compression = {'sstable_compression': 'org.apache.cassandra.io.compress.LZ4Compressor'} + AND dclocal_read_repair_chance = 0.1 + AND default_time_to_live = 0 + AND gc_grace_seconds = 864000 + AND max_index_interval = 2048 + AND memtable_flush_period_in_ms = 0 + AND min_index_interval = 128 + AND read_repair_chance = 0.0 + AND speculative_retry = 'NONE'; + +CREATE TABLE legacy.simple_with_col ( + key uuid PRIMARY KEY, + col_with_meta text +) WITH COMPACT STORAGE + AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}' + AND comment = '' + AND compaction = {'min_threshold': '4', 'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32'} + AND compression = {'sstable_compression': 'org.apache.cassandra.io.compress.LZ4Compressor'} + AND dclocal_read_repair_chance = 0.1 + AND default_time_to_live = 0 + AND gc_grace_seconds = 864000 + AND max_index_interval = 2048 + AND memtable_flush_period_in_ms = 0 + AND min_index_interval = 128 + AND read_repair_chance = 0.0 + AND speculative_retry = 'NONE'; + +CREATE TABLE legacy.simple_no_col ( + key uuid, + column1 text, + value text, + PRIMARY KEY (key, column1) +) WITH COMPACT STORAGE + AND CLUSTERING ORDER BY (column1 ASC) + AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}' + AND comment = '' + AND compaction = {'min_threshold': '4', 'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32'} + AND compression = {'sstable_compression': 'org.apache.cassandra.io.compress.LZ4Compressor'} + AND dclocal_read_repair_chance = 0.1 + AND default_time_to_live = 0 + AND gc_grace_seconds = 864000 + AND max_index_interval = 2048 + AND memtable_flush_period_in_ms = 0 + AND min_index_interval = 128 + AND read_repair_chance = 0.0 + AND speculative_retry = 'NONE'; + +CREATE TABLE legacy.composite_comp_no_col ( + key blob, + column1 'org.apache.cassandra.db.marshal.DynamicCompositeType(t=>org.apache.cassandra.db.marshal.TimeUUIDType, b=>org.apache.cassandra.db.marshal.BytesType, s=>org.apache.cassandra.db.marshal.UTF8Type)', + column2 's=>org.apache.cassandra.db.marshal.UTF8Type', + value blob, + PRIMARY KEY (key, column1, column1, column2) +) WITH COMPACT STORAGE + AND CLUSTERING ORDER BY (column1 ASC, column1 ASC, column2 ASC) + AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}' + AND comment = 'Stores file meta data' + AND compaction = {'min_threshold': '4', 'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32'} + AND compression = {'sstable_compression': 'org.apache.cassandra.io.compress.LZ4Compressor'} + AND dclocal_read_repair_chance = 0.1 + AND default_time_to_live = 0 + AND gc_grace_seconds = 864000 + AND max_index_interval = 2048 + AND memtable_flush_period_in_ms = 0 + AND min_index_interval = 128 + AND read_repair_chance = 0.0 + AND speculative_retry = 'NONE';""" + + ccm = get_cluster() + ccm.run_cli(cli_script) + + cluster = Cluster(protocol_version=PROTOCOL_VERSION) + session = cluster.connect() + + legacy_meta = cluster.metadata.keyspaces['legacy'] + + self.assertEqual(legacy_meta.export_as_string(), expected_string) + + session.execute('DROP KEYSPACE legacy') + + cluster.shutdown() class TokenMetadataTest(unittest.TestCase): """ From 0d9bf65691657fefe30c3c216ff9eb82921f0472 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 23 Dec 2014 15:55:10 -0600 Subject: [PATCH 1102/3726] expand test for UnknownReplicationStrategy --- tests/unit/test_metadata.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/unit/test_metadata.py b/tests/unit/test_metadata.py index bdb7d5f1e0..9099037369 100644 --- a/tests/unit/test_metadata.py +++ b/tests/unit/test_metadata.py @@ -49,6 +49,10 @@ def test_replication_strategy(self): rs = ReplicationStrategy() self.assertEqual(rs.create('OldNetworkTopologyStrategy', None), _UnknownStrategy('OldNetworkTopologyStrategy', None)) + fake_options_map = {'options': 'map'} + uks = rs.create('OldNetworkTopologyStrategy', fake_options_map) + self.assertEqual(uks, _UnknownStrategy('OldNetworkTopologyStrategy', fake_options_map)) + self.assertEqual(uks.make_token_replica_map({}, []), {}) fake_options_map = {'dc1': '3'} self.assertIsInstance(rs.create('NetworkTopologyStrategy', fake_options_map), NetworkTopologyStrategy) From f739fda8bc35fe17f33441252b58abd7b5a903b0 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 23 Dec 2014 16:17:35 -0600 Subject: [PATCH 1103/3726] Add a test for TokenAware against a local table. Fixed in 61b4fad, would previously blow up with KeyError --- .../integration/long/test_loadbalancingpolicies.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/tests/integration/long/test_loadbalancingpolicies.py b/tests/integration/long/test_loadbalancingpolicies.py index ea1eee78dd..8bb9e3caa0 100644 --- a/tests/integration/long/test_loadbalancingpolicies.py +++ b/tests/integration/long/test_loadbalancingpolicies.py @@ -452,6 +452,19 @@ def test_token_aware_with_rf_2(self, use_prepared=False): self.coordinator_stats.assert_query_count_equals(self, 2, 0) self.coordinator_stats.assert_query_count_equals(self, 3, 12) + def test_token_aware_with_local_table(self): + use_singledc() + cluster = Cluster( + load_balancing_policy=TokenAwarePolicy(RoundRobinPolicy()), + protocol_version=PROTOCOL_VERSION) + session = cluster.connect() + + p = session.prepare("SELECT * FROM system.local WHERE key=?") + # this would blow up prior to 61b4fad + r = session.execute(p, ('local',)) + self.assertEqual(len(r), 1) + self.assertEqual(r[0].key, 'local') + def test_white_list(self): use_singledc() keyspace = 'test_white_list' From d6b8a57d5f10e80a73d5aef2e10c92182f7fec10 Mon Sep 17 00:00:00 2001 From: Amy Hanlon Date: Sat, 27 Dec 2014 16:00:15 -0500 Subject: [PATCH 1104/3726] bump cassandra version to 1.2 in puppet manifest --- manifests/default.pp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/manifests/default.pp b/manifests/default.pp index 3c5ad95642..af718e13e1 100644 --- a/manifests/default.pp +++ b/manifests/default.pp @@ -33,14 +33,14 @@ exec {"download-cassandra": cwd => "/tmp", - command => "wget http://download.nextag.com/apache/cassandra/1.1.6/apache-cassandra-1.1.6-bin.tar.gz", - creates => "/tmp/apache-cassandra-1.1.6-bin.tar.gz", + command => "wget http://download.nextag.com/apache/cassandra/1.2.19/apache-cassandra-1.2.19-bin.tar.gz", + creates => "/tmp/apache-cassandra-1.2.19-bin.tar.gz", require => [Package["wget"], File["/etc/init/cassandra.conf"]] } exec {"install-cassandra": cwd => "/tmp", - command => "tar -xzf apache-cassandra-1.1.6-bin.tar.gz; mv apache-cassandra-1.1.6 /usr/local/cassandra", + command => "tar -xzf apache-cassandra-1.2.19-bin.tar.gz; mv apache-cassandra-1.2.19 /usr/local/cassandra", require => Exec["download-cassandra"], creates => "/usr/local/cassandra/bin/cassandra" } From 54311fae5b903ec29ba5a68675a7e04f9c6694aa Mon Sep 17 00:00:00 2001 From: Amy Hanlon Date: Sat, 27 Dec 2014 16:43:14 -0500 Subject: [PATCH 1105/3726] fix codeblock formatting --- docs/topics/development.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/topics/development.rst b/docs/topics/development.rst index cc5c6de73f..b3a4112507 100644 --- a/docs/topics/development.rst +++ b/docs/topics/development.rst @@ -30,15 +30,15 @@ Testing Locally Before testing, you'll need to set an environment variable to the version of Cassandra that's being tested. The version cooresponds to the release, so for example if you're testing against Cassandra 2.1, you'd set the following: - .. code-block::bash +.. code-block:: bash - export CASSANDRA_VERSION=20 + export CASSANDRA_VERSION=20 At the command line, execute: - .. code-block::bash +.. code-block:: bash - bin/test.py + bin/test.py This is a wrapper for nose that also sets up the database connection. From 13208895444eea7ff3abf66f3138774d1994b9b3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rafa=C5=82=20Furma=C5=84ski?= Date: Sun, 28 Dec 2014 22:56:19 +0100 Subject: [PATCH 1106/3726] Fix travis gpg error This commit fixes failing travis build as advised here: https://github.com/travis-ci/travis-ci/issues/2934#issuecomment-61806248 --- .travis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.travis.yml b/.travis.yml index 96044d1e57..dd4379e258 100644 --- a/.travis.yml +++ b/.travis.yml @@ -12,6 +12,7 @@ python: before_install: - sudo echo "deb http://www.apache.org/dist/cassandra/debian ${CASSANDRA_VERSION}x main" | sudo tee -a /etc/apt/sources.list - sudo echo "deb-src http://www.apache.org/dist/cassandra/debian ${CASSANDRA_VERSION}x main" | sudo tee -a /etc/apt/sources.list + - sudo rm -rf ~/.gnupg - sudo gpg --keyserver pgp.mit.edu --recv-keys F758CE318D77295D - sudo gpg --export --armor F758CE318D77295D | sudo apt-key add - - sudo gpg --keyserver pgp.mit.edu --recv-keys 2B5C1B00 From a74c44501f2a46f468bcecea15fc889c6d4a2185 Mon Sep 17 00:00:00 2001 From: amygdalama Date: Mon, 22 Dec 2014 22:47:56 -0500 Subject: [PATCH 1107/3726] add test for assigning an empty set --- .../statements/test_assignment_clauses.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/cqlengine/tests/statements/test_assignment_clauses.py b/cqlengine/tests/statements/test_assignment_clauses.py index 32da18b228..982688a055 100644 --- a/cqlengine/tests/statements/test_assignment_clauses.py +++ b/cqlengine/tests/statements/test_assignment_clauses.py @@ -65,6 +65,24 @@ def test_no_update(self): c.update_context(ctx) self.assertEqual(ctx, {}) + def test_update_empty_set(self): + """tests assigning a set to an empty set creates a nonempty + update statement and nonzero context size.""" + c = SetUpdateClause(field='s', value=set()) + c._analyze() + c.set_context_id(0) + + self.assertEqual(c._assignments, set()) + self.assertIsNone(c._additions) + self.assertIsNone(c._removals) + + self.assertEqual(c.get_context_size(), 1) + self.assertEqual(str(c), '"s" = %(0)s') + + ctx = {} + c.update_context(ctx) + self.assertEqual(ctx, {'0' : set()}) + def test_additions(self): c = SetUpdateClause('s', {1, 2, 3}, previous={1, 2}) c._analyze() From f4873ed6244c1787313a15f6ce21bf9c9a11647f Mon Sep 17 00:00:00 2001 From: amygdalama Date: Mon, 22 Dec 2014 22:52:05 -0500 Subject: [PATCH 1108/3726] return a context size of 1 when assigning the empty set --- cqlengine/statements.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cqlengine/statements.py b/cqlengine/statements.py index 4a6f318ffc..b044c86c7a 100644 --- a/cqlengine/statements.py +++ b/cqlengine/statements.py @@ -218,7 +218,7 @@ def _analyze(self): def get_context_size(self): if not self._analyzed: self._analyze() if (self.previous is None and - self._assignments is None and + not self._assignments and self._additions is None and self._removals is None): return 1 From 849b569bb6e6a558c9ffacd69393d39e4c302bc4 Mon Sep 17 00:00:00 2001 From: Amy Hanlon Date: Sun, 28 Dec 2014 17:51:58 -0500 Subject: [PATCH 1109/3726] add setup_package for running nosetests --- cqlengine/tests/__init__.py | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/cqlengine/tests/__init__.py b/cqlengine/tests/__init__.py index e69de29bb2..87cb239575 100644 --- a/cqlengine/tests/__init__.py +++ b/cqlengine/tests/__init__.py @@ -0,0 +1,25 @@ +import os + +from cqlengine import connection + +def setup_package(): + try: + CASSANDRA_VERSION = int(os.environ["CASSANDRA_VERSION"]) + except: + print("CASSANDRA_VERSION must be set as an environment variable. " + "One of (12, 20, 21)") + raise + + if os.environ.get('CASSANDRA_TEST_HOST'): + CASSANDRA_TEST_HOST = os.environ['CASSANDRA_TEST_HOST'] + else: + CASSANDRA_TEST_HOST = 'localhost' + + if CASSANDRA_VERSION < 20: + protocol_version = 1 + else: + protocol_version = 2 + + connection.setup([CASSANDRA_TEST_HOST], + protocol_version=protocol_version, + default_keyspace='cqlengine_test') From 3d06858b8058ffb600e5e1612b1e21459ef1b18a Mon Sep 17 00:00:00 2001 From: Amy Hanlon Date: Sun, 4 Jan 2015 17:56:17 -0500 Subject: [PATCH 1110/3726] use a constant to get protocol_version instead of module-level calls to get_cluster --- cqlengine/tests/base.py | 3 +++ cqlengine/tests/columns/test_static_column.py | 9 ++++----- cqlengine/tests/management/test_management.py | 8 ++++---- cqlengine/tests/query/test_queryset.py | 8 +++----- cqlengine/tests/test_ifnotexists.py | 8 +++----- 5 files changed, 17 insertions(+), 19 deletions(-) diff --git a/cqlengine/tests/base.py b/cqlengine/tests/base.py index dea0cc191b..5bd66b648c 100644 --- a/cqlengine/tests/base.py +++ b/cqlengine/tests/base.py @@ -4,7 +4,10 @@ import six from cqlengine.connection import get_session + CASSANDRA_VERSION = int(os.environ['CASSANDRA_VERSION']) +PROTOCOL_VERSION = 1 if CASSANDRA_VERSION < 20 else 2 + class BaseCassEngTestCase(TestCase): diff --git a/cqlengine/tests/columns/test_static_column.py b/cqlengine/tests/columns/test_static_column.py index 34c553dc89..e8cfdedb9a 100644 --- a/cqlengine/tests/columns/test_static_column.py +++ b/cqlengine/tests/columns/test_static_column.py @@ -4,10 +4,9 @@ from cqlengine import columns from cqlengine.management import sync_table, drop_table from cqlengine.models import ModelDefinitionException -from cqlengine.tests.base import BaseCassEngTestCase, CASSANDRA_VERSION -from cqlengine.connection import get_cluster +from cqlengine.tests.base import BaseCassEngTestCase +from cqlengine.tests.base import CASSANDRA_VERSION, PROTOCOL_VERSION -cluster = get_cluster() class TestStaticModel(Model): __keyspace__ = 'test' @@ -31,7 +30,7 @@ def tearDownClass(cls): super(TestStaticColumn, cls).tearDownClass() drop_table(TestStaticModel) - @skipUnless(cluster.protocol_version >= 2, "only runs against the cql3 protocol v2.0") + @skipUnless(PROTOCOL_VERSION >= 2, "only runs against the cql3 protocol v2.0") def test_mixed_updates(self): """ Tests that updates on both static and non-static columns work as intended """ instance = TestStaticModel.create() @@ -47,7 +46,7 @@ def test_mixed_updates(self): assert actual.static == "it's still shared" - @skipUnless(cluster.protocol_version >= 2, "only runs against the cql3 protocol v2.0") + @skipUnless(PROTOCOL_VERSION >= 2, "only runs against the cql3 protocol v2.0") def test_static_only_updates(self): """ Tests that updates on static only column work as intended """ instance = TestStaticModel.create() diff --git a/cqlengine/tests/management/test_management.py b/cqlengine/tests/management/test_management.py index 3bda88ed9d..55258e52b9 100644 --- a/cqlengine/tests/management/test_management.py +++ b/cqlengine/tests/management/test_management.py @@ -3,14 +3,14 @@ from cqlengine.connection import get_session from cqlengine.exceptions import CQLEngineException from cqlengine.management import get_fields, sync_table, drop_table -from cqlengine.tests.base import BaseCassEngTestCase, CASSANDRA_VERSION +from cqlengine.tests.base import BaseCassEngTestCase +from cqlengine.tests.base import CASSANDRA_VERSION, PROTOCOL_VERSION from cqlengine import management from cqlengine.tests.query.test_queryset import TestModel from cqlengine.models import Model from cqlengine import columns, SizeTieredCompactionStrategy, LeveledCompactionStrategy from unittest import skipUnless -from cqlengine.connection import get_cluster -cluster = get_cluster() + class CreateKeyspaceTest(BaseCassEngTestCase): def test_create_succeeeds(self): @@ -256,7 +256,7 @@ def test_failure(self): sync_table(self.FakeModel) -@skipUnless(cluster.protocol_version >= 2, "only runs against the cql3 protocol v2.0") +@skipUnless(PROTOCOL_VERSION >= 2, "only runs against the cql3 protocol v2.0") def test_static_columns(): class StaticModel(Model): id = columns.Integer(primary_key=True) diff --git a/cqlengine/tests/query/test_queryset.py b/cqlengine/tests/query/test_queryset.py index 00b1be49ab..8383ccdf91 100644 --- a/cqlengine/tests/query/test_queryset.py +++ b/cqlengine/tests/query/test_queryset.py @@ -20,10 +20,8 @@ from cqlengine import statements from cqlengine import operators - -from cqlengine.connection import get_cluster, get_session - -cluster = get_cluster() +from cqlengine.connection import get_session +from cqlengine.tests.base import PROTOCOL_VERSION class TzOffset(tzinfo): @@ -690,7 +688,7 @@ def test_objects_property_returns_fresh_queryset(self): assert TestModel.objects._result_cache is None -@skipUnless(cluster.protocol_version >= 2, "only runs against the cql3 protocol v2.0") +@skipUnless(PROTOCOL_VERSION >= 2, "only runs against the cql3 protocol v2.0") def test_paged_result_handling(): # addresses #225 class PagingTest(Model): diff --git a/cqlengine/tests/test_ifnotexists.py b/cqlengine/tests/test_ifnotexists.py index 202a1c03e9..6749ae8820 100644 --- a/cqlengine/tests/test_ifnotexists.py +++ b/cqlengine/tests/test_ifnotexists.py @@ -1,14 +1,12 @@ from unittest import skipUnless from cqlengine.management import sync_table, drop_table, create_keyspace, delete_keyspace from cqlengine.tests.base import BaseCassEngTestCase +from cqlengine.tests.base import PROTOCOL_VERSION from cqlengine.models import Model from cqlengine.exceptions import LWTException from cqlengine import columns, BatchQuery from uuid import uuid4 import mock -from cqlengine.connection import get_cluster - -cluster = get_cluster() class TestIfNotExistsModel(Model): @@ -43,7 +41,7 @@ def tearDownClass(cls): class IfNotExistsInsertTests(BaseIfNotExistsTest): - @skipUnless(cluster.protocol_version >= 2, "only runs against the cql3 protocol v2.0") + @skipUnless(PROTOCOL_VERSION >= 2, "only runs against the cql3 protocol v2.0") def test_insert_if_not_exists_success(self): """ tests that insertion with if_not_exists work as expected """ @@ -77,7 +75,7 @@ def test_insert_if_not_exists_failure(self): self.assertEquals(tm.count, 9) self.assertEquals(tm.text, '111111111111') - @skipUnless(cluster.protocol_version >= 2, "only runs against the cql3 protocol v2.0") + @skipUnless(PROTOCOL_VERSION >= 2, "only runs against the cql3 protocol v2.0") def test_batch_insert_if_not_exists_success(self): """ tests that batch insertion with if_not_exists work as expected """ From 038a11914ae972076e8e5e3ad5b3ceac17b6cd35 Mon Sep 17 00:00:00 2001 From: Amy Hanlon Date: Sun, 4 Jan 2015 17:59:19 -0500 Subject: [PATCH 1111/3726] remove connecting to cassandra in test script since nose handles it in setup_package --- bin/test.py | 24 ------------------------ 1 file changed, 24 deletions(-) diff --git a/bin/test.py b/bin/test.py index 4ac302a29e..d8d5097beb 100755 --- a/bin/test.py +++ b/bin/test.py @@ -1,30 +1,6 @@ #!/usr/bin/env python -import os import nose -import sys -sys.path.append("") - -# setup cassandra -from cqlengine import connection - -try: - CASSANDRA_VERSION = int(os.environ["CASSANDRA_VERSION"]) -except: - print("CASSANDRA_VERSION must be set as an environment variable. One of (12, 20, 21)") - raise - -if os.environ.get('CASSANDRA_TEST_HOST'): - CASSANDRA_TEST_HOST = os.environ['CASSANDRA_TEST_HOST'] -else: - CASSANDRA_TEST_HOST = 'localhost' - -if CASSANDRA_VERSION < 20: - protocol_version = 1 -else: - protocol_version = 2 - -connection.setup([CASSANDRA_TEST_HOST], protocol_version=protocol_version, default_keyspace='cqlengine_test') nose.main() From f04284810e0ace64ae22d20d09f3e78d3d926893 Mon Sep 17 00:00:00 2001 From: Amy Hanlon Date: Sun, 4 Jan 2015 18:15:25 -0500 Subject: [PATCH 1112/3726] pip should install the dev requirements when provisioning --- manifests/default.pp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/manifests/default.pp b/manifests/default.pp index af718e13e1..4b94fc8512 100644 --- a/manifests/default.pp +++ b/manifests/default.pp @@ -60,7 +60,7 @@ exec {"install-requirements": cwd => "/vagrant", - command => "pip install -r requirements.txt", + command => "pip install -r requirements-dev.txt", require => [Package["python-pip"], Package["python-dev"]] } } From 767bcf34d25f0088e2b3f34cd4079dc720679e4f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Kandulski?= Date: Mon, 5 Jan 2015 13:05:09 +0100 Subject: [PATCH 1113/3726] Properly handle unhexlify argument type and return type in Python 3 When the schema contains user defined types, binascii.uhexlify gets called. In Python 3 unhexlify accepts "only bytestring or bytearray objects as input" and returns bytes too. This caused apply_parameters to crash. Now for Python 3 there'a a wrapper than accepts str or bytes and returns str. --- cassandra/cqltypes.py | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index 100c2968c9..5db5f7132b 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -59,6 +59,18 @@ _number_types = frozenset((int, long, float)) +if six.PY3: + def _unhexlify(s): + if not isinstance(s, six.binary_type): + s = s.encode('ascii') + result = unhexlify(s) + if isinstance(result, six.binary_type): + result = result.decode('ascii') + return result +else: + _unhexlify = unhexlify + + def trim_if_startswith(s, prefix): if s.startswith(prefix): return s[len(prefix):] @@ -854,8 +866,8 @@ def make_udt_class(cls, keyspace, udt_name, names_and_types, mapped_class): @classmethod def apply_parameters(cls, subtypes, names): keyspace = subtypes[0] - udt_name = unhexlify(subtypes[1].cassname) - field_names = [unhexlify(encoded_name) for encoded_name in names[2:]] + udt_name = _unhexlify(subtypes[1].cassname) + field_names = [_unhexlify(encoded_name) for encoded_name in names[2:]] assert len(field_names) == len(subtypes[2:]) return type(udt_name, (cls,), {'subtypes': subtypes[2:], 'cassname': cls.cassname, From 907a6c8a3c787d3d9c3eae27d208498ef37f3499 Mon Sep 17 00:00:00 2001 From: Amy Hanlon Date: Mon, 5 Jan 2015 22:22:47 -0500 Subject: [PATCH 1114/3726] use nosetests command to run tests in travis --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index dd4379e258..331e0b59ba 100644 --- a/.travis.yml +++ b/.travis.yml @@ -29,4 +29,4 @@ install: - "pip install -r requirements.txt --use-mirrors" script: - - "bin/test.py --no-skip" + - "nosetests cqlengine/tests --no-skip" From b4fb4468660105be76e0ab3347f971a2c8580a8a Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 6 Jan 2015 11:01:46 -0600 Subject: [PATCH 1115/3726] Decode column names from composite types (x=>) PYTHON-192 follow-on --- cassandra/cqltypes.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index 100c2968c9..15169621cc 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -129,13 +129,13 @@ def parse_casstype_args(typestring): elif tok == ')': types, names = args.pop() prev_types, prev_names = args[-1] - prev_types[-1] = prev_types[-1].apply_parameters(types, names) else: types, names = args[-1] - if ':' in tok: - name, tok = tok.rsplit(':', 1) - names.append(name) + parts = re.split(':|=>', tok) + tok = parts.pop() + if parts: + names.append(parts[0]) else: names.append(None) @@ -294,7 +294,7 @@ def apply_parameters(cls, subtypes, names=None): newname = cls.cass_parameterized_type_with(subtypes) if six.PY2 and isinstance(newname, unicode): newname = newname.encode('utf-8') - return type(newname, (cls,), {'subtypes': subtypes, 'cassname': cls.cassname}) + return type(newname, (cls,), {'subtypes': subtypes, 'cassname': cls.cassname, 'fieldnames': names}) @classmethod def cql_parameterized_type(cls): From 1a54d402c3c799a96cea25836d455073bac15508 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 6 Jan 2015 11:02:32 -0600 Subject: [PATCH 1116/3726] Use composite type col names, don't display incompatible tables. PYTHON-192 follow-on --- cassandra/metadata.py | 30 ++++++++++++++++++++- tests/integration/standard/test_metadata.py | 30 +++++++++++++++------ 2 files changed, 51 insertions(+), 9 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index c23f194271..4e8d0fe5d7 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -264,6 +264,10 @@ def _build_table_metadata(self, keyspace_metadata, row, col_rows, trigger_rows): is_compact = True has_value = column_aliases or not cf_col_rows clustering_size = num_column_name_components + + # Some thrift tables define names in composite types (see PYTHON-192) + if not column_aliases and hasattr(comparator, 'fieldnames'): + column_aliases = comparator.fieldnames else: is_compact = True if column_aliases or not cf_col_rows: @@ -356,6 +360,7 @@ def _build_table_metadata(self, keyspace_metadata, row, col_rows, trigger_rows): table_meta.options = self._build_table_options(row) table_meta.is_compact_storage = is_compact + return table_meta def _build_table_options(self, row): @@ -922,6 +927,18 @@ def primary_key(self): A dict mapping trigger names to :class:`.TriggerMetadata` instances. """ + @property + def is_cql_compatible(self): + """ + A boolean indicating if this table can be represented as CQL in export + """ + # no such thing as DCT in CQL + incompatible = issubclass(self.comparator, types.DynamicCompositeType) + # no compact storage with more than one column beyond PK + incompatible |= self.is_compact_storage and len(self.columns) > len(self.primary_key) + 1 + + return not incompatible + def __init__(self, keyspace_metadata, name, partition_key=None, clustering_key=None, columns=None, triggers=None, options=None): self.keyspace = keyspace_metadata self.name = name @@ -938,6 +955,18 @@ def export_as_string(self): along with all indexes on it. The returned string is formatted to be human readable. """ + if self.is_cql_compatible: + ret = self.all_as_cql() + else: + # If we can't produce this table with CQL, comment inline + ret = "/*\nWarning: Table %s.%s omitted because it has constructs not compatible with CQL (was created via legacy API).\n" % \ + (self.keyspace.name, self.name) + ret += "\nApproximate structure, for reference:\n(this should not be used to reproduce this schema)\n\n%s" % self.all_as_cql() + ret += "\n*/" + + return ret + + def all_as_cql(self): ret = self.as_cql_query(formatted=True) ret += ";" @@ -947,7 +976,6 @@ def export_as_string(self): for trigger_meta in self.triggers.values(): ret += "\n%s;" % (trigger_meta.as_cql_query(),) - return ret def as_cql_query(self, formatted=False): diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index f8988fecab..7f0485e93e 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -626,16 +626,22 @@ def test_legacy_tables(self): # is a bit strange, but it replays in CQL with desired results expected_string = """CREATE KEYSPACE legacy WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'} AND durable_writes = true; +/* +Warning: Table legacy.composite_comp_with_col omitted because it has constructs not compatible with CQL (was created via legacy API). + +Approximate structure, for reference: +(this should not be used to reproduce this schema) + CREATE TABLE legacy.composite_comp_with_col ( key blob, - column0 't=>org.apache.cassandra.db.marshal.TimeUUIDType', - column1 'b=>org.apache.cassandra.db.marshal.BytesType', - column2 's=>org.apache.cassandra.db.marshal.UTF8Type', + t timeuuid, + b blob, + s text, "b@6869746d65776974686d75736963" blob, "b@6d616d6d616a616d6d61" blob, - PRIMARY KEY (key, column0, column1, column2) + PRIMARY KEY (key, t, b, s) ) WITH COMPACT STORAGE - AND CLUSTERING ORDER BY (column0 ASC, column1 ASC, column2 ASC) + AND CLUSTERING ORDER BY (t ASC, b ASC, s ASC) AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}' AND comment = 'Stores file meta data' AND compaction = {'min_threshold': '4', 'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32'} @@ -650,6 +656,7 @@ def test_legacy_tables(self): AND speculative_retry = 'NONE'; CREATE INDEX idx_two ON legacy.composite_comp_with_col ("b@6869746d65776974686d75736963"); CREATE INDEX idx_one ON legacy.composite_comp_with_col ("b@6d616d6d616a616d6d61"); +*/ CREATE TABLE legacy.nested_composite_key ( key 'org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UUIDType, org.apache.cassandra.db.marshal.UTF8Type)', @@ -747,10 +754,16 @@ def test_legacy_tables(self): AND read_repair_chance = 0.0 AND speculative_retry = 'NONE'; +/* +Warning: Table legacy.composite_comp_no_col omitted because it has constructs not compatible with CQL (was created via legacy API). + +Approximate structure, for reference: +(this should not be used to reproduce this schema) + CREATE TABLE legacy.composite_comp_no_col ( key blob, - column1 'org.apache.cassandra.db.marshal.DynamicCompositeType(t=>org.apache.cassandra.db.marshal.TimeUUIDType, b=>org.apache.cassandra.db.marshal.BytesType, s=>org.apache.cassandra.db.marshal.UTF8Type)', - column2 's=>org.apache.cassandra.db.marshal.UTF8Type', + column1 'org.apache.cassandra.db.marshal.DynamicCompositeType(org.apache.cassandra.db.marshal.TimeUUIDType, org.apache.cassandra.db.marshal.BytesType, org.apache.cassandra.db.marshal.UTF8Type)', + column2 text, value blob, PRIMARY KEY (key, column1, column1, column2) ) WITH COMPACT STORAGE @@ -766,7 +779,8 @@ def test_legacy_tables(self): AND memtable_flush_period_in_ms = 0 AND min_index_interval = 128 AND read_repair_chance = 0.0 - AND speculative_retry = 'NONE';""" + AND speculative_retry = 'NONE'; +*/""" ccm = get_cluster() ccm.run_cli(cli_script) From bdc4259c493f62143e0de7e7f8276050bfbea58f Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 6 Jan 2015 11:26:57 -0600 Subject: [PATCH 1117/3726] Remove implicit timestamp scaling from DateType PYTHON-204 --- cassandra/cqltypes.py | 25 +++++-------------------- tests/unit/test_types.py | 5 ++--- 2 files changed, 7 insertions(+), 23 deletions(-) diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index 100c2968c9..66c3df6603 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -576,31 +576,16 @@ def deserialize(byts, protocol_version): @staticmethod def serialize(v, protocol_version): try: - converted = calendar.timegm(v.utctimetuple()) - converted = converted * 1e3 + getattr(v, 'microsecond', 0) / 1e3 + # v is datetime + timestamp_seconds = calendar.timegm(v.utctimetuple()) + timestamp = timestamp_seconds * 1e3 + getattr(v, 'microsecond', 0) / 1e3 except AttributeError: # Ints and floats are valid timestamps too if type(v) not in _number_types: raise TypeError('DateType arguments must be a datetime or timestamp') + timestamp = v - global _have_warned_about_timestamps - if not _have_warned_about_timestamps: - _have_warned_about_timestamps = True - warnings.warn( - "timestamp columns in Cassandra hold a number of " - "milliseconds since the unix epoch. Currently, when executing " - "prepared statements, this driver multiplies timestamp " - "values by 1000 so that the result of time.time() " - "can be used directly. However, the driver cannot " - "match this behavior for non-prepared statements, " - "so the 2.0 version of the driver will no longer multiply " - "timestamps by 1000. It is suggested that you simply use " - "datetime.datetime objects for 'timestamp' values to avoid " - "any ambiguity and to guarantee a smooth upgrade of the " - "driver.") - converted = v * 1e3 - - return int64_pack(long(converted)) + return int64_pack(long(timestamp)) class TimestampType(DateType): diff --git a/tests/unit/test_types.py b/tests/unit/test_types.py index 5758c508aa..24b1a66724 100644 --- a/tests/unit/test_types.py +++ b/tests/unit/test_types.py @@ -183,9 +183,8 @@ def test_datetype(self): now_timestamp = time.time() now_datetime = datetime.datetime.utcfromtimestamp(now_timestamp) - # same results serialized - # (this could change if we follow up on the timestamp multiplication warning in DateType.serialize) - self.assertEqual(DateType.serialize(now_datetime, 0), DateType.serialize(now_timestamp, 0)) + # same results serialized (must scale the timestamp to milliseconds) + self.assertEqual(DateType.serialize(now_datetime, 0), DateType.serialize(now_timestamp * 1e3, 0)) # from timestamp date_type = DateType(now_timestamp) From a68b1de613c41266d5547c6ce8a2f2e118b173ad Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 6 Jan 2015 14:11:34 -0600 Subject: [PATCH 1118/3726] Better diff error messages for test_metadata strings --- tests/integration/standard/test_metadata.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index f8988fecab..dec42ac80e 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import six +import difflib try: import unittest2 as unittest @@ -339,6 +340,14 @@ def test_export_keyspace_schema(self): self.assertIsInstance(keyspace_metadata.export_as_string(), six.string_types) self.assertIsInstance(keyspace_metadata.as_cql_query(), six.string_types) + def assert_equal_diff(self, received, expected): + if received != expected: + diff_string = '\n'.join(difflib.unified_diff(expected.split('\n'), + received.split('\n'), + 'EXPECTED', 'RECEIVED', + lineterm='')) + self.fail(diff_string) + def test_export_keyspace_schema_udts(self): """ Test udt exports @@ -415,7 +424,7 @@ def test_export_keyspace_schema_udts(self): AND read_repair_chance = 0.0 AND speculative_retry = '99.0PERCENTILE';""" - self.assertEqual(cluster.metadata.keyspaces['export_udts'].export_as_string(), expected_string) + self.assert_equal_diff(cluster.metadata.keyspaces['export_udts'].export_as_string(), expected_string) table_meta = cluster.metadata.keyspaces['export_udts'].tables['users'] @@ -436,7 +445,7 @@ def test_export_keyspace_schema_udts(self): AND read_repair_chance = 0.0 AND speculative_retry = '99.0PERCENTILE';""" - self.assertEqual(table_meta.export_as_string(), expected_string) + self.assert_equal_diff(table_meta.export_as_string(), expected_string) def test_case_sensitivity(self): """ @@ -775,8 +784,7 @@ def test_legacy_tables(self): session = cluster.connect() legacy_meta = cluster.metadata.keyspaces['legacy'] - - self.assertEqual(legacy_meta.export_as_string(), expected_string) + self.assert_equal_diff(legacy_meta.export_as_string(), expected_string) session.execute('DROP KEYSPACE legacy') From 1545984aa6a7949c91ee31060b57c371c647201b Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 6 Jan 2015 16:48:01 -0600 Subject: [PATCH 1119/3726] Build schema meta in face of schema disagreements at startup. Also, setting max_schema_agreement_wait <= 0 bypasses wait in all cases. PYTHON-205 --- cassandra/cluster.py | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index d826b9afd8..83a58122df 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -336,6 +336,7 @@ def auth_provider(self, value): """ The maximum duration (in seconds) that the driver will wait for schema agreement across the cluster. Defaults to ten seconds. + If set <= 0, the driver will bypass schema agreement waits altogether. """ metadata = None @@ -1810,6 +1811,9 @@ def _try_connect(self, host): self._refresh_node_list_and_token_map(connection, preloaded_results=shared_results) self._refresh_schema(connection, preloaded_results=shared_results) + if not self._cluster.metadata.keyspaces: + log.warning("[control connection] No schema built on connect; retrying without wait for schema agreement") + self._refresh_schema(connection, preloaded_results=shared_results, schema_agreement_wait=0) except Exception: connection.close() raise @@ -1893,13 +1897,15 @@ def refresh_schema(self, keyspace=None, table=None, usertype=None): log.debug("[control connection] Error refreshing schema", exc_info=True) self._signal_error() - def _refresh_schema(self, connection, keyspace=None, table=None, usertype=None, preloaded_results=None): + def _refresh_schema(self, connection, keyspace=None, table=None, usertype=None, preloaded_results=None, schema_agreement_wait=None): if self._cluster.is_shutdown: return assert table is None or usertype is None - agreed = self.wait_for_schema_agreement(connection, preloaded_results=preloaded_results) + agreed = self.wait_for_schema_agreement(connection, + preloaded_results=preloaded_results, + wait_time=schema_agreement_wait) if not agreed: log.debug("Skipping schema refresh due to lack of schema agreement") return @@ -2142,7 +2148,12 @@ def _handle_schema_change(self, event): usertype = event.get('type') self._submit(self.refresh_schema, keyspace, table, usertype) - def wait_for_schema_agreement(self, connection=None, preloaded_results=None): + def wait_for_schema_agreement(self, connection=None, preloaded_results=None, wait_time=None): + + total_timeout = wait_time if wait_time is not None else self._cluster.max_schema_agreement_wait + if total_timeout <= 0: + return True + # Each schema change typically generates two schema refreshes, one # from the response type and one from the pushed notification. Holding # a lock is just a simple way to cut down on the number of schema queries @@ -2167,7 +2178,6 @@ def wait_for_schema_agreement(self, connection=None, preloaded_results=None): start = self._time.time() elapsed = 0 cl = ConsistencyLevel.ONE - total_timeout = self._cluster.max_schema_agreement_wait schema_mismatches = None while elapsed < total_timeout: peers_query = QueryMessage(query=self._SELECT_SCHEMA_PEERS, consistency_level=cl) From 67a8b8edaeb434e2d1e2e7075cd21415992f1b34 Mon Sep 17 00:00:00 2001 From: Christopher Bradford Date: Wed, 7 Jan 2015 09:12:54 -0500 Subject: [PATCH 1120/3726] Added column name to ValidateError messages --- cqlengine/columns.py | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index 786dd1ee4f..e5d63baef0 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -252,7 +252,7 @@ def validate(self, value): value = super(Text, self).validate(value) if value is None: return if not isinstance(value, (six.string_types, bytearray)) and value is not None: - raise ValidationError('{} is not a string'.format(type(value))) + raise ValidationError('{} {} is not a string'.format(self.column_name, type(value))) if self.max_length: if len(value) > self.max_length: raise ValidationError('{} is longer than {} characters'.format(self.column_name, self.max_length)) @@ -271,7 +271,7 @@ def validate(self, value): try: return int(val) except (TypeError, ValueError): - raise ValidationError("{} can't be converted to integral value".format(value)) + raise ValidationError("{} {} can't be converted to integral value".format(self.column_name, value)) def to_python(self, value): return self.validate(value) @@ -295,7 +295,7 @@ def validate(self, value): return int(val) except (TypeError, ValueError): raise ValidationError( - "{} can't be converted to integral value".format(value)) + "{} {} can't be converted to integral value".format(self.column_name, value)) def to_python(self, value): return self.validate(value) @@ -351,7 +351,7 @@ def to_database(self, value): if isinstance(value, date): value = datetime(value.year, value.month, value.day) else: - raise ValidationError("'{}' is not a datetime object".format(value)) + raise ValidationError("{} '{}' is not a datetime object".format(self.column_name, value)) epoch = datetime(1970, 1, 1, tzinfo=value.tzinfo) offset = epoch.tzinfo.utcoffset(epoch).total_seconds() if epoch.tzinfo else 0 @@ -378,7 +378,7 @@ def to_database(self, value): if isinstance(value, datetime): value = value.date() if not isinstance(value, date): - raise ValidationError("'{}' is not a date object".format(repr(value))) + raise ValidationError("{} '{}' is not a date object".format(self.column_name, repr(value))) return int((value - date(1970, 1, 1)).total_seconds() * 1000) @@ -398,7 +398,7 @@ def validate(self, value): if isinstance(val, _UUID): return val if isinstance(val, six.string_types) and self.re_uuid.match(val): return _UUID(val) - raise ValidationError("{} is not a valid uuid".format(value)) + raise ValidationError("{} {} is not a valid uuid".format(self.column_name, value)) def to_python(self, value): return self.validate(value) @@ -480,7 +480,7 @@ def validate(self, value): try: return float(value) except (TypeError, ValueError): - raise ValidationError("{} is not a valid float".format(value)) + raise ValidationError("{} {} is not a valid float".format(self.column_name, value)) def to_python(self, value): return self.validate(value) @@ -500,7 +500,7 @@ def validate(self, value): try: return _Decimal(val) except InvalidOperation: - raise ValidationError("'{}' can't be coerced to decimal".format(val)) + raise ValidationError("{} '{}' can't be coerced to decimal".format(self.column_name, val)) def to_python(self, value): return self.validate(value) @@ -542,7 +542,7 @@ def validate(self, value): # It is dangerous to let collections have more than 65535. # See: https://issues.apache.org/jira/browse/CASSANDRA-5428 if value is not None and len(value) > 65535: - raise ValidationError("Collection can't have more than 65535 elements.") + raise ValidationError("{} Collection can't have more than 65535 elements.".format(self.column_name)) return value def get_column_def(self): @@ -593,12 +593,12 @@ def validate(self, value): types = (set,) if self.strict else (set, list, tuple) if not isinstance(val, types): if self.strict: - raise ValidationError('{} is not a set object'.format(val)) + raise ValidationError('{} {} is not a set object'.format(self.column_name, val)) else: - raise ValidationError('{} cannot be coerced to a set object'.format(val)) + raise ValidationError('{} {} cannot be coerced to a set object'.format(self.column_name, val)) if None in val: - raise ValidationError("None not allowed in a set") + raise ValidationError("{} None not allowed in a set".format(self.column_name)) return {self.value_col.validate(v) for v in val} @@ -639,9 +639,9 @@ def validate(self, value): val = super(List, self).validate(value) if val is None: return if not isinstance(val, (set, list, tuple)): - raise ValidationError('{} is not a list object'.format(val)) + raise ValidationError('{} {} is not a list object'.format(self.column_name, val)) if None in val: - raise ValidationError("None is not allowed in a list") + raise ValidationError("{} None is not allowed in a list".format(self.column_name)) return [self.value_col.validate(v) for v in val] def to_python(self, value): @@ -714,7 +714,7 @@ def validate(self, value): val = super(Map, self).validate(value) if val is None: return if not isinstance(val, dict): - raise ValidationError('{} is not a dict object'.format(val)) + raise ValidationError('{} {} is not a dict object'.format(self.column_name, val)) return {self.key_col.validate(k):self.value_col.validate(v) for k,v in val.items()} def to_python(self, value): From 9364fa825b2ba69a6e6453ee6e95a8c843df6fb6 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 7 Jan 2015 11:28:58 -0600 Subject: [PATCH 1121/3726] Add Cluster.refresh_schema for explicit, synchronous refresh. PYTHON-205 --- cassandra/cluster.py | 35 ++++++++++++++++++++++++++--------- 1 file changed, 26 insertions(+), 9 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 83a58122df..0a3481767e 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1045,14 +1045,26 @@ def _ensure_core_connections(self): for pool in session._pools.values(): pool.ensure_core_connections() - def submit_schema_refresh(self, keyspace=None, table=None): + def refresh_schema(self, keyspace=None, table=None, usertype=None, schema_agreement_wait=None): + """ + Synchronously refresh the schema metadata. + By default timeout for this operation is governed by :attr:`~.Cluster.max_schema_agreement_wait` + and :attr:`~.Cluster.control_connection_timeout`. + Passing schema_agreement_wait here overrides :attr:`~.Cluster.max_schema_agreement_wait`. Setting + schema_agreement_wait <= 0 will bypass schema agreement and refresh schema immediately. + RuntimeWarning is raised if schema refresh fails for any reason. + """ + if not self.control_connection.refresh_schema(keyspace, table, usertype, schema_agreement_wait): + raise RuntimeWarning("Schema was not refreshed. See log for details.") + + def submit_schema_refresh(self, keyspace=None, table=None, usertype=None): """ Schedule a refresh of the internal representation of the current schema for this cluster. If `keyspace` is specified, only that keyspace will be refreshed, and likewise for `table`. """ return self.executor.submit( - self.control_connection.refresh_schema, keyspace, table) + self.control_connection.refresh_schema, keyspace, table, usertype) def _prepare_all_queries(self, host): if not self._prepared_statements: @@ -1887,19 +1899,23 @@ def shutdown(self): self._connection.close() del self._connection - def refresh_schema(self, keyspace=None, table=None, usertype=None): + def refresh_schema(self, keyspace=None, table=None, usertype=None, + schema_agreement_wait=None): try: if self._connection: - self._refresh_schema(self._connection, keyspace, table, usertype) + return self._refresh_schema(self._connection, keyspace, table, usertype, + schema_agreement_wait=schema_agreement_wait) except ReferenceError: pass # our weak reference to the Cluster is no good except Exception: log.debug("[control connection] Error refreshing schema", exc_info=True) self._signal_error() + return False - def _refresh_schema(self, connection, keyspace=None, table=None, usertype=None, preloaded_results=None, schema_agreement_wait=None): + def _refresh_schema(self, connection, keyspace=None, table=None, usertype=None, + preloaded_results=None, schema_agreement_wait=None): if self._cluster.is_shutdown: - return + return False assert table is None or usertype is None @@ -1908,7 +1924,7 @@ def _refresh_schema(self, connection, keyspace=None, table=None, usertype=None, wait_time=schema_agreement_wait) if not agreed: log.debug("Skipping schema refresh due to lack of schema agreement") - return + return False cl = ConsistencyLevel.ONE if table: @@ -1924,7 +1940,7 @@ def _handle_results(success, result): col_query = QueryMessage(query=self._SELECT_COLUMNS + where_clause, consistency_level=cl) triggers_query = QueryMessage(query=self._SELECT_TRIGGERS + where_clause, consistency_level=cl) (cf_success, cf_result), (col_success, col_result), (triggers_success, triggers_result) \ - = connection.wait_for_responses(cf_query, col_query, triggers_query, fail_on_error=False) + = connection.wait_for_responses(cf_query, col_query, triggers_query, timeout=self._timeout, fail_on_error=False) log.debug("[control connection] Fetched table info for %s.%s, rebuilding metadata", keyspace, table) cf_result = _handle_results(cf_success, cf_result) @@ -1963,7 +1979,7 @@ def _handle_results(success, result): QueryMessage(query=self._SELECT_TRIGGERS, consistency_level=cl) ] - responses = connection.wait_for_responses(*queries, fail_on_error=False) + responses = connection.wait_for_responses(*queries, timeout=self._timeout, fail_on_error=False) (ks_success, ks_result), (cf_success, cf_result), \ (col_success, col_result), (types_success, types_result), \ (trigger_success, triggers_result) = responses @@ -2009,6 +2025,7 @@ def _handle_results(success, result): log.debug("[control connection] Fetched schema, rebuilding metadata") self._cluster.metadata.rebuild_schema(ks_result, types_result, cf_result, col_result, triggers_result) + return True def refresh_node_list_and_token_map(self, force_token_rebuild=False): try: From 07051753cdeb0b4e2826f7dbffd3a9634bd0cb1d Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 7 Jan 2015 11:39:06 -0600 Subject: [PATCH 1122/3726] simplify ControlConnection._handle_schema_change --- cassandra/cluster.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 0a3481767e..0d89eab724 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2160,8 +2160,8 @@ def _handle_status_change(self, event): self._cluster.on_down(host, is_host_addition=False) def _handle_schema_change(self, event): - keyspace = event['keyspace'] or None - table = event.get('table') or None + keyspace = event.get('keyspace') + table = event.get('table') usertype = event.get('type') self._submit(self.refresh_schema, keyspace, table, usertype) From e4707b0ada557c3c5d8131fee3788253d2a01cf7 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 7 Jan 2015 11:46:24 -0600 Subject: [PATCH 1123/3726] log.warn() --> log.warning() --- cassandra/cluster.py | 14 +++++++------- cassandra/metadata.py | 2 +- cassandra/pool.py | 2 +- cassandra/query.py | 15 ++++++++------- 4 files changed, 17 insertions(+), 16 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 0d89eab724..f024b07cc7 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1050,8 +1050,8 @@ def refresh_schema(self, keyspace=None, table=None, usertype=None, schema_agreem Synchronously refresh the schema metadata. By default timeout for this operation is governed by :attr:`~.Cluster.max_schema_agreement_wait` and :attr:`~.Cluster.control_connection_timeout`. - Passing schema_agreement_wait here overrides :attr:`~.Cluster.max_schema_agreement_wait`. Setting - schema_agreement_wait <= 0 will bypass schema agreement and refresh schema immediately. + Passing schema_agreement_wait here overrides :attr:`~.Cluster.max_schema_agreement_wait`. + Setting schema_agreement_wait <= 0 will bypass schema agreement and refresh schema immediately. RuntimeWarning is raised if schema refresh fails for any reason. """ if not self.control_connection.refresh_schema(keyspace, table, usertype, schema_agreement_wait): @@ -2007,8 +2007,8 @@ def _handle_results(success, result): log.debug("[control connection] triggers table not found") triggers_result = {} elif isinstance(triggers_result, Unauthorized): - log.warn("[control connection] this version of Cassandra does not allow access to schema_triggers metadata with authorization enabled (CASSANDRA-7967); " - "The driver will operate normally, but will not reflect triggers in the local metadata model, or schema strings.") + log.warning("[control connection] this version of Cassandra does not allow access to schema_triggers metadata with authorization enabled (CASSANDRA-7967); " + "The driver will operate normally, but will not reflect triggers in the local metadata model, or schema strings.") triggers_result = {} else: raise triggers_result @@ -2086,7 +2086,7 @@ def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, tokens = row.get("tokens") if not tokens: - log.warn("Excluding host (%s) with no tokens in system.peers table of %s." % (addr, connection.host)) + log.warning("Excluding host (%s) with no tokens in system.peers table of %s." % (addr, connection.host)) continue found_hosts.add(addr) @@ -2223,8 +2223,8 @@ def wait_for_schema_agreement(self, connection=None, preloaded_results=None, wai self._time.sleep(0.2) elapsed = self._time.time() - start - log.warn("Node %s is reporting a schema disagreement: %s", - connection.host, schema_mismatches) + log.warning("Node %s is reporting a schema disagreement: %s", + connection.host, schema_mismatches) return False def _get_schema_mismatches(self, peers_result, local_result, local_address): diff --git a/cassandra/metadata.py b/cassandra/metadata.py index c23f194271..3fdce6965a 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -513,7 +513,7 @@ def create(cls, strategy_class, options_map): try: rs_instance = rs_class(options_map) except Exception as exc: - log.warn("Failed creating %s with options %s: %s", strategy_name, options_map, exc) + log.warning("Failed creating %s with options %s: %s", strategy_name, options_map, exc) return None return rs_instance diff --git a/cassandra/pool.py b/cassandra/pool.py index 462f597bbd..587fa27753 100644 --- a/cassandra/pool.py +++ b/cassandra/pool.py @@ -183,7 +183,7 @@ def run(self): # call on_exception for logging purposes even if next_delay is None if self.on_exception(exc, next_delay): if next_delay is None: - log.warn( + log.warning( "Will not continue to retry reconnection attempts " "due to an exhausted retry schedule") else: diff --git a/cassandra/query.py b/cassandra/query.py index f9e0c08696..1b0a56d4a8 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -103,11 +103,12 @@ def named_tuple_factory(colnames, rows): try: Row = namedtuple('Row', clean_column_names) except Exception: - log.warn("Failed creating named tuple for results with column names %s (cleaned: %s) (see Python 'namedtuple' documentation for details on name rules). " - "Results will be returned with positional names. " - "Avoid this by choosing different names, using SELECT \"\" AS aliases, " - "or specifying a different row_factory on your Session" % - (colnames, clean_column_names)) + log.warning("Failed creating named tuple for results with column names %s (cleaned: %s) " + "(see Python 'namedtuple' documentation for details on name rules). " + "Results will be returned with positional names. " + "Avoid this by choosing different names, using SELECT \"\" AS aliases, " + "or specifying a different row_factory on your Session" % + (colnames, clean_column_names)) Row = namedtuple('Row', clean_column_names, rename=True) return [Row(*row) for row in rows] @@ -190,7 +191,7 @@ class Statement(object): :class:`~.TokenAwarePolicy` is configured for :attr:`.Cluster.load_balancing_policy` - It is set implicitly on :class:`.BoundStatement`, and :class:`.BatchStatement`, + It is set implicitly on :class:`.BoundStatement`, and :class:`.BatchStatement`, but must be set explicitly on :class:`.SimpleStatement`. .. versionadded:: 2.1.3 @@ -326,7 +327,7 @@ class PreparedStatement(object): column_metadata = None query_id = None query_string = None - keyspace = None # change to prepared_keyspace in major release + keyspace = None # change to prepared_keyspace in major release routing_key_indexes = None From ee8453a552077b5be0f894e2298ac9e4b4447490 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 7 Jan 2015 12:07:42 -0600 Subject: [PATCH 1124/3726] Only run test_metadata/test_legacy_tables for 2.1.0+ --- tests/integration/standard/test_metadata.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index dec42ac80e..d527e22be8 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -546,6 +546,9 @@ def test_token_map(self): def test_legacy_tables(self): + if get_server_versions()[0] < (2, 1, 0): + raise unittest.SkipTest('Test schema output assumes 2.1.0+ options') + cli_script = """CREATE KEYSPACE legacy WITH placement_strategy = 'SimpleStrategy' AND strategy_options = {replication_factor:1}; From 4a911ef9ac6231d36ff37b3e5fd0cea4a4ee6786 Mon Sep 17 00:00:00 2001 From: Brian Cantoni Date: Wed, 7 Jan 2015 18:32:29 -0800 Subject: [PATCH 1125/3726] Doc improvements: - Explain prerequisites needed for benchmark examples - Minor spelling and other fixes --- docs/getting_started.rst | 4 ++-- docs/index.rst | 2 +- docs/installation.rst | 4 ++-- docs/performance.rst | 19 +++++++++++++++---- docs/upgrading.rst | 6 +++--- docs/user_defined_types.rst | 4 ++-- 6 files changed, 25 insertions(+), 14 deletions(-) diff --git a/docs/getting_started.rst b/docs/getting_started.rst index 1d7ce2acc6..2d458b3ea8 100644 --- a/docs/getting_started.rst +++ b/docs/getting_started.rst @@ -68,7 +68,7 @@ which sets the default keyspace for all queries made through that :class:`~.Sess session = cluster.connect('mykeyspace') -You can always change a Sesssion's keyspace using :meth:`~.Session.set_keyspace` or +You can always change a Session's keyspace using :meth:`~.Session.set_keyspace` or by executing a ``USE `` query: .. code-block:: python @@ -385,7 +385,7 @@ prepared statement: user2 = session.execute(user_lookup_stmt, [user_id2])[0] The second option is to create a :class:`~.BoundStatement` from the -:class:`~.PreparedStatement` and binding paramaters and set a consistency +:class:`~.PreparedStatement` and binding parameters and set a consistency level on that: .. code-block:: python diff --git a/docs/index.rst b/docs/index.rst index 8763baba2d..9df17964bc 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -8,7 +8,7 @@ The driver supports Python 2.6, 2.7, 3.3, and 3.4. This driver is open source under the `Apache v2 License `_. -The source code for this driver can be found on `Github `_. +The source code for this driver can be found on `GitHub `_. Contents -------- diff --git a/docs/installation.rst b/docs/installation.rst index 14fb9b17ba..01b943761c 100644 --- a/docs/installation.rst +++ b/docs/installation.rst @@ -12,7 +12,7 @@ Linux, OSX, and Windows are supported. Installation through pip ------------------------ `pip `_ is the suggested tool for installing -packages. It will handle installing all python dependencies for the driver at +packages. It will handle installing all Python dependencies for the driver at the same time as the driver itself. To install the driver*:: pip install cassandra-driver @@ -102,7 +102,7 @@ When installing manually through setup.py, you can disable both with the ``--no-extensions`` option, or selectively disable one or the other with ``--no-murmur3`` and ``--no-libev``. -To compile the extenions, ensure that GCC and the Python headers are available. +To compile the extensions, ensure that GCC and the Python headers are available. On Ubuntu and Debian, this can be accomplished by running:: diff --git a/docs/performance.rst b/docs/performance.rst index cb378f75a5..c7a6b3b290 100644 --- a/docs/performance.rst +++ b/docs/performance.rst @@ -1,6 +1,6 @@ Performance Notes ================= -The python driver for Cassandra offers several methods for executing queries. +The Python driver for Cassandra offers several methods for executing queries. You can synchronously block for queries to complete using :meth:`.Session.execute()`, you can use a future-like interface through :meth:`.Session.execute_async()`, or you can attach a callback to the future @@ -13,7 +13,7 @@ Benchmark Notes All benchmarks were executed using the `benchmark scripts `_ in the driver repository. They were executed on a laptop with 16 GiB of RAM, an SSD, -and a 2 GHz, four core CPU with hyperthreading. The Cassandra cluster was a three +and a 2 GHz, four core CPU with hyper-threading. The Cassandra cluster was a three node `ccm `_ cluster running on the same laptop with version 1.2.13 of Cassandra. I suggest testing these benchmarks against your own cluster when tuning the driver for optimal throughput or latency. @@ -26,6 +26,17 @@ by using the ``--asyncore-only`` command line option. Each benchmark completes 100,000 small inserts. The replication factor for the keyspace was three, so all nodes were replicas for the inserted rows. +The benchmarks require the Python driver C extensions as well as a few additional +Python packages. Follow these steps to install the prerequisites: + +1. Install packages to support Python driver C extensions: + + * Debian/Ubuntu: ``sudo apt-get install gcc python-dev libev4 libev-dev`` + * RHEL/CentOS/Fedora: ``sudo yum install gcc python-dev libev4 libev-dev`` + +2. Install Python packages: ``pip install scales twisted blist`` +3. Re-install the Cassandra driver: ``pip install --upgrade cassandra-driver`` + Synchronous Execution (`sync.py `_) ------------------------------------------------------------------------------------------------------------- Although this is the simplest way to make queries, it has low throughput @@ -173,7 +184,7 @@ Callback Chaining (`callback_full_pipeline.py Date: Thu, 8 Jan 2015 12:29:27 -0600 Subject: [PATCH 1126/3726] generate unique request_id when sending AuthResponse --- cassandra/connection.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/connection.py b/cassandra/connection.py index cf093eb721..5a58793cb7 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -558,7 +558,7 @@ def _handle_startup_response(self, startup_response, did_authenticate=False): log.debug("Sending SASL-based auth response on %s", self) initial_response = self.authenticator.initial_response() initial_response = "" if initial_response is None else initial_response - self.send_msg(AuthResponseMessage(initial_response), 0, self._handle_auth_response) + self.send_msg(AuthResponseMessage(initial_response), self.get_request_id(), self._handle_auth_response) elif isinstance(startup_response, ErrorMessage): log.debug("Received ErrorMessage on new connection (%s) from %s: %s", id(self), self.host, startup_response.summary_msg()) From 2d3eab2c2c58c830c7b63000a0b23065c6f7cadb Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 8 Jan 2015 14:02:58 -0600 Subject: [PATCH 1127/3726] Raise Exception instead of Warning in Cluster.refresh_schema --- cassandra/cluster.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index f024b07cc7..0822e2a285 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1052,10 +1052,10 @@ def refresh_schema(self, keyspace=None, table=None, usertype=None, schema_agreem and :attr:`~.Cluster.control_connection_timeout`. Passing schema_agreement_wait here overrides :attr:`~.Cluster.max_schema_agreement_wait`. Setting schema_agreement_wait <= 0 will bypass schema agreement and refresh schema immediately. - RuntimeWarning is raised if schema refresh fails for any reason. + An Exception is raised if schema refresh fails for any reason. """ if not self.control_connection.refresh_schema(keyspace, table, usertype, schema_agreement_wait): - raise RuntimeWarning("Schema was not refreshed. See log for details.") + raise Exception("Schema was not refreshed. See log for details.") def submit_schema_refresh(self, keyspace=None, table=None, usertype=None): """ From 21ac88035b30b508512092933dfafed185a65365 Mon Sep 17 00:00:00 2001 From: Tyler Hobbs Date: Fri, 9 Jan 2015 14:47:25 -0600 Subject: [PATCH 1128/3726] Minor fix to example script --- example.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/example.py b/example.py index e068db98ed..30050f32cb 100644 --- a/example.py +++ b/example.py @@ -74,7 +74,8 @@ def main(): try: rows = future.result() except Exception: - log.exeception() + log.exception("Error reading rows:") + return for row in rows: log.info('\t'.join(row)) From 4f3e51672d9691b7a7919ed074b75907750f606c Mon Sep 17 00:00:00 2001 From: Amy Hanlon Date: Sun, 11 Jan 2015 15:28:32 -0500 Subject: [PATCH 1129/3726] skip transaction tests when running cassandra 1.2 --- cqlengine/tests/test_transaction.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/cqlengine/tests/test_transaction.py b/cqlengine/tests/test_transaction.py index 43eb488f37..6080a65d3a 100644 --- a/cqlengine/tests/test_transaction.py +++ b/cqlengine/tests/test_transaction.py @@ -1,6 +1,9 @@ __author__ = 'Tim Martin' +from unittest import skipUnless + from cqlengine.management import sync_table, drop_table from cqlengine.tests.base import BaseCassEngTestCase +from cqlengine.tests.base import CASSANDRA_VERSION from cqlengine.models import Model from cqlengine.exceptions import LWTException from uuid import uuid4 @@ -18,6 +21,7 @@ class TestTransactionModel(Model): text = columns.Text(required=False) +@skipUnless(CASSANDRA_VERSION >= 20, "transactions only supported on cassandra 2.0 or higher") class TestTransaction(BaseCassEngTestCase): @classmethod @@ -94,4 +98,4 @@ def test_batch_update_transaction(self): self.assertRaises(LWTException, b.execute) updated = TestTransactionModel.objects(id=id).first() - self.assertEqual(updated.text, 'something else') \ No newline at end of file + self.assertEqual(updated.text, 'something else') From 26d2471390424c38d06e59877b2cf1d43c23eebc Mon Sep 17 00:00:00 2001 From: Amy Hanlon Date: Mon, 12 Jan 2015 22:03:53 -0500 Subject: [PATCH 1130/3726] add amy to authors --- AUTHORS | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/AUTHORS b/AUTHORS index 2da11977a5..8e03f7f80b 100644 --- a/AUTHORS +++ b/AUTHORS @@ -13,4 +13,5 @@ Michael Hall Netanel Cohen-Tzemach Mariusz Kryński Greg Doermann -@pandu-rao \ No newline at end of file +@pandu-rao +Amy Hanlon From 46315504e567b8e8f24dffade22e3867fb5b6cf3 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 12 Jan 2015 17:46:52 -0600 Subject: [PATCH 1131/3726] Add idle connection heartbeats. PYTHON-197 --- cassandra/cluster.py | 46 ++++++++++++++---- cassandra/connection.py | 102 +++++++++++++++++++++++++++++++++++++++- cassandra/pool.py | 7 +++ 3 files changed, 145 insertions(+), 10 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 0822e2a285..e8a99ebd8c 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -39,12 +39,13 @@ from cassandra.util import WeakSet # NOQA from functools import partial, wraps -from itertools import groupby +from itertools import groupby, chain from cassandra import (ConsistencyLevel, AuthenticationFailed, InvalidRequest, OperationTimedOut, UnsupportedOperation, Unauthorized) -from cassandra.connection import ConnectionException, ConnectionShutdown +from cassandra.connection import (ConnectionException, ConnectionShutdown, + ConnectionHeartbeat) from cassandra.encoder import Encoder from cassandra.protocol import (QueryMessage, ResultMessage, ErrorMessage, ReadTimeoutErrorMessage, @@ -372,6 +373,14 @@ def auth_provider(self, value): If set to :const:`None`, there will be no timeout for these queries. """ + idle_heartbeat_interval = 30 + """ + Interval, in seconds, on which to heartbeat idle connections. This helps + keep connections open through network devices that expire idle connections. + It also helps discover bad connections early in low-traffic scenarios. + Setting to zero disables heartbeats. + """ + sessions = None control_connection = None scheduler = None @@ -380,6 +389,7 @@ def auth_provider(self, value): _is_setup = False _prepared_statements = None _prepared_statement_lock = None + _idle_heartbeat = None _user_types = None """ @@ -406,7 +416,8 @@ def __init__(self, protocol_version=2, executor_threads=2, max_schema_agreement_wait=10, - control_connection_timeout=2.0): + control_connection_timeout=2.0, + idle_heartbeat_interval=30): """ Any of the mutable Cluster attributes may be set as keyword arguments to the constructor. @@ -456,6 +467,7 @@ def __init__(self, self.cql_version = cql_version self.max_schema_agreement_wait = max_schema_agreement_wait self.control_connection_timeout = control_connection_timeout + self.idle_heartbeat_interval = idle_heartbeat_interval self._listeners = set() self._listener_lock = Lock() @@ -700,6 +712,8 @@ def connect(self, keyspace=None): self.load_balancing_policy.check_supported() + if self.idle_heartbeat_interval: + self._idle_heartbeat = ConnectionHeartbeat(self.idle_heartbeat_interval, self.get_connection_holders) self._is_setup = True session = self._new_session() @@ -707,6 +721,13 @@ def connect(self, keyspace=None): session.set_keyspace(keyspace) return session + def get_connection_holders(self): + holders = [] + for s in self.sessions: + holders.extend(s.get_pools()) + holders.append(self.control_connection) + return holders + def shutdown(self): """ Closes all sessions and connection associated with this Cluster. @@ -734,6 +755,9 @@ def shutdown(self): if self.executor: self.executor.shutdown() + if self._idle_heartbeat: + self._idle_heartbeat.stop() + def _new_session(self): session = Session(self, self.metadata.all_hosts()) for keyspace, type_map in six.iteritems(self._user_types): @@ -1656,6 +1680,9 @@ def submit(self, fn, *args, **kwargs): def get_pool_state(self): return dict((host, pool.get_state()) for host, pool in self._pools.items()) + def get_pools(self): + return self._pools.values() + class UserTypeDoesNotExist(Exception): """ @@ -2271,11 +2298,6 @@ def _signal_error(self): # manually self.reconnect() - @property - def is_open(self): - conn = self._connection - return bool(conn and conn.is_open) - def on_up(self, host): pass @@ -2295,6 +2317,14 @@ def on_add(self, host): def on_remove(self, host): self.refresh_node_list_and_token_map(force_token_rebuild=True) + def get_connections(self): + c = getattr(self, '_connection', None) + return [c] if c else [] + + def return_connection(self, connection): + if connection is self._connection and (connection.is_defunct or connection.is_closed): + self.reconnect() + def _stop_scheduler(scheduler, thread): try: diff --git a/cassandra/connection.py b/cassandra/connection.py index 5a58793cb7..fa7e72743d 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -20,7 +20,7 @@ import logging import os import sys -from threading import Event, RLock +from threading import Thread, Event, RLock import time if 'gevent.monkey' in sys.modules: @@ -159,7 +159,7 @@ class Connection(object): in_flight = 0 # A set of available request IDs. When using the v3 protocol or higher, - # this will no initially include all request IDs in order to save memory, + # this will not initially include all request IDs in order to save memory, # but the set will grow if it is exhausted. request_ids = None @@ -172,6 +172,8 @@ class Connection(object): lock = None user_type_map = None + msg_received = False + is_control_connection = False _iobuf = None @@ -401,6 +403,8 @@ def process_msg(self, msg, body_len): with self.lock: self.request_ids.append(stream_id) + self.msg_received = True + body = None try: # check that the protocol version is supported @@ -673,6 +677,13 @@ def process_result(result): self.send_msg(query, request_id, process_result) + @property + def is_idle(self): + return self.in_flight == 0 and not self.msg_received + + def reset_idle(self): + self.msg_received = False + def __str__(self): status = "" if self.is_defunct: @@ -732,3 +743,90 @@ def deliver(self, timeout=None): raise OperationTimedOut() else: return self.responses + + +class HeartbeatFuture(object): + def __init__(self, connection, owner): + self._exception = None + self._event = Event() + self.connection = connection + self.owner = owner + log.debug("Sending options message heartbeat on idle connection %s %s", + id(connection), connection.host) + with connection.lock: + connection.send_msg(OptionsMessage(), connection.get_request_id(), self._options_callback) + connection.in_flight += 1 + + def wait(self, timeout): + if self._event.wait(timeout): + if self._exception: + raise self._exception + else: + raise OperationTimedOut() + + def _options_callback(self, response): + if not isinstance(response, SupportedMessage): + if isinstance(response, ConnectionException): + self._exception = response + else: + self._exception = ConnectionException("Received unexpected response to OptionsMessage: %s" + % (response,)) + + log.debug("Received options response on connection (%s) from %s", + id(self.connection), self.connection.host) + self._event.set() + + +class ConnectionHeartbeat(Thread): + + def __init__(self, interval_sec, get_connection_holders): + Thread.__init__(self, name="Connection heartbeat") + self._interval = interval_sec + self._get_connection_holders = get_connection_holders + self._shutdown_event = Event() + self.daemon = True + self.start() + + def run(self): + elapsed = 0 + while not self._shutdown_event.wait(self._interval - elapsed): + start_time = time.time() + + futures = [] + failed_connections = [] + try: + for connections, owner in [(o.get_connections(), o) for o in self._get_connection_holders()]: + for connection in connections: + if not (connection.is_defunct or connection.is_closed) and connection.is_idle: + try: + futures.append(HeartbeatFuture(connection, owner)) + except Exception: + log.warning("Failed sending heartbeat message on connection (%s) to %s", + id(connection), connection.host, exc_info=True) + failed_connections.append((connection, owner)) + else: + connection.reset_idle() + + for f in futures: + connection = f.connection + try: + f.wait(self._interval) + # TODO: move this, along with connection locks in pool, down into Connection + with connection.lock: + connection.in_flight -= 1 + connection.reset_idle() + except Exception: + log.warning("Heartbeat failed for connection (%s) to %s", + id(connection), connection.host, exc_info=True) + failed_connections.append((f.connection, f.owner)) + + for connection, owner in failed_connections: + connection.defunct(Exception('Connection heartbeat failure')) + owner.return_connection(connection) + except Exception: + log.warning("Failed connection heartbeat", exc_info=True) + + elapsed = time.time() - start_time + + def stop(self): + self._shutdown_event.set() diff --git a/cassandra/pool.py b/cassandra/pool.py index 587fa27753..4331be43eb 100644 --- a/cassandra/pool.py +++ b/cassandra/pool.py @@ -360,6 +360,10 @@ def connection_finished_setting_keyspace(conn, error): self._connection.set_keyspace_async(keyspace, connection_finished_setting_keyspace) + def get_connections(self): + c = self._connection + return [c] if c else [] + def get_state(self): have_conn = self._connection is not None in_flight = self._connection.in_flight if have_conn else 0 @@ -693,6 +697,9 @@ def connection_finished_setting_keyspace(conn, error): for conn in self._connections: conn.set_keyspace_async(keyspace, connection_finished_setting_keyspace) + def get_connections(self): + return self._connections + def get_state(self): in_flights = ", ".join([str(c.in_flight) for c in self._connections]) return "shutdown: %s, open_count: %d, in_flights: %s" % (self.is_shutdown, self.open_count, in_flights) From d2f3875599ccb702070c2fdfcf602b3e1b795cc9 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 12 Jan 2015 17:59:16 -0600 Subject: [PATCH 1132/3726] cleanup; Remove unneeded member checks in Cluster --- cassandra/cluster.py | 31 +++++++++++++------------------ 1 file changed, 13 insertions(+), 18 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index e8a99ebd8c..e4b51ab69e 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -700,15 +700,14 @@ def connect(self, keyspace=None): self.load_balancing_policy.populate( weakref.proxy(self), self.metadata.all_hosts()) - if self.control_connection: - try: - self.control_connection.connect() - log.debug("Control connection created") - except Exception: - log.exception("Control connection failed to connect, " - "shutting down Cluster:") - self.shutdown() - raise + try: + self.control_connection.connect() + log.debug("Control connection created") + except Exception: + log.exception("Control connection failed to connect, " + "shutting down Cluster:") + self.shutdown() + raise self.load_balancing_policy.check_supported() @@ -742,18 +741,14 @@ def shutdown(self): else: self.is_shutdown = True - if self.scheduler: - self.scheduler.shutdown() + self.scheduler.shutdown() - if self.control_connection: - self.control_connection.shutdown() + self.control_connection.shutdown() - if self.sessions: - for session in self.sessions: - session.shutdown() + for session in self.sessions: + session.shutdown() - if self.executor: - self.executor.shutdown() + self.executor.shutdown() if self._idle_heartbeat: self._idle_heartbeat.stop() From 8562fd0e6848d8c117af96e77ae9564e9c0e53dd Mon Sep 17 00:00:00 2001 From: Joshua McKenzie Date: Sun, 14 Sep 2014 12:35:15 -0700 Subject: [PATCH 1133/3726] adding date and time types for CASSANDRA-7523 --- cassandra/cqltypes.py | 99 +++++++++++++++++++++++- cassandra/protocol.py | 5 +- tests/integration/standard/test_types.py | 54 +++++++------ tests/unit/test_marshalling.py | 2 + tests/unit/test_types.py | 63 ++++++++++++++- 5 files changed, 195 insertions(+), 28 deletions(-) diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index 15169621cc..9d375d4a02 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -43,7 +43,8 @@ import six from six.moves import range -from cassandra.marshal import (int8_pack, int8_unpack, uint16_pack, uint16_unpack, +from cassandra.marshal import (int8_pack, int8_unpack, + uint16_pack, uint16_unpack, uint32_pack, uint32_unpack, int32_pack, int32_unpack, int64_pack, int64_unpack, float_pack, float_unpack, double_pack, double_unpack, varint_pack, varint_unpack) @@ -54,9 +55,13 @@ if six.PY3: _number_types = frozenset((int, float)) + _time_types = frozenset((int)) + _date_types = frozenset((int)) long = int else: _number_types = frozenset((int, long, float)) + _time_types = frozenset((int, long)) + _date_types = frozenset((int, long)) def trim_if_startswith(s, prefix): @@ -523,8 +528,7 @@ def serialize(addr, protocol_version): class CounterColumnType(LongType): typename = 'counter' - -cql_time_formats = ( +cql_timestamp_formats = ( '%Y-%m-%d %H:%M', '%Y-%m-%d %H:%M:%S', '%Y-%m-%dT%H:%M', @@ -551,7 +555,7 @@ def interpret_datestring(date): date = date[:-5] else: offset = -time.timezone - for tformat in cql_time_formats: + for tformat in cql_timestamp_formats: try: tval = time.strptime(date, tformat) except ValueError: @@ -625,6 +629,93 @@ def serialize(timeuuid, protocol_version): raise TypeError("Got a non-UUID object for a UUID value") +class SimpleDateType(_CassandraType): + typename = 'date' + seconds_per_day = 60 * 60 * 24 + date_format = "%Y-%m-%d" + + @classmethod + def validate(cls, date): + if isinstance(date, basestring): + date = cls.interpret_simpledate_string(date) + return date + + @staticmethod + def interpret_simpledate_string(v): + try: + tval = time.strptime(v, SimpleDateType.date_format) + # shift upward w/epoch at 2**31 + return (calendar.timegm(tval) / SimpleDateType.seconds_per_day) + 2**31 + except TypeError: + # Ints are valid dates too + if type(v) not in _date_types: + raise TypeError('Date arguments must be an unsigned integer or string in the format YYYY-MM-DD') + return v + + @staticmethod + def serialize(val, protocol_version): + date_val = SimpleDateType.interpret_simpledate_string(val) + return uint32_pack(date_val) + + @staticmethod + def deserialize(byts, protocol_version): + Result = namedtuple('SimpleDate', 'value') + return Result(value=uint32_unpack(byts)) + + +class TimeType(_CassandraType): + typename = 'time' + ONE_MICRO=1000 + ONE_MILLI=1000*ONE_MICRO + ONE_SECOND=1000*ONE_MILLI + ONE_MINUTE=60*ONE_SECOND + ONE_HOUR=60*ONE_MINUTE + + @classmethod + def validate(cls, val): + if isinstance(val, basestring): + time = cls.interpret_timestring(val) + return time + + @staticmethod + def interpret_timestring(val): + try: + nano = 0 + try: + base_time_str = val + if '.' in base_time_str: + base_time_str = val[0:val.find('.')] + base_time = time.strptime(base_time_str, "%H:%M:%S") + nano = base_time.tm_hour * TimeType.ONE_HOUR + nano += base_time.tm_min * TimeType.ONE_MINUTE + nano += base_time.tm_sec * TimeType.ONE_SECOND + + if '.' in val: + nano_time_str = val[val.find('.')+1:] + # right pad to 9 digits + while len(nano_time_str) < 9: + nano_time_str += "0" + nano += int(nano_time_str) + + except AttributeError as e: + if type(val) not in _time_types: + raise TypeError('TimeType arguments must be a string or whole number') + # long / int values passed in are acceptable too + nano = val + return nano + except ValueError as e: + raise ValueError("can't interpret %r as a time" % (val,)) + + @staticmethod + def serialize(val, protocol_version): + return int64_pack(TimeType.interpret_timestring(val)) + + @staticmethod + def deserialize(byts, protocol_version): + Result = namedtuple('Time', 'value') + return Result(value=int64_unpack(byts)) + + class UTF8Type(_CassandraType): typename = 'text' empty_binary_ok = True diff --git a/cassandra/protocol.py b/cassandra/protocol.py index 4ac65f5acb..00239f1cb1 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -33,7 +33,8 @@ InetAddressType, IntegerType, ListType, LongType, MapType, SetType, TimeUUIDType, UTF8Type, UUIDType, UserType, - TupleType, lookup_casstype) + TupleType, lookup_casstype, SimpleDateType, + TimeType) from cassandra.policies import WriteType log = logging.getLogger(__name__) @@ -530,6 +531,8 @@ class ResultMessage(_MessageType): 0x000E: IntegerType, 0x000F: TimeUUIDType, 0x0010: InetAddressType, + 0x0011: SimpleDateType, + 0x0012: TimeType, 0x0020: ListType, 0x0021: MapType, 0x0022: SetType, diff --git a/tests/integration/standard/test_types.py b/tests/integration/standard/test_types.py index fb985fa7dc..44978e67dc 100644 --- a/tests/integration/standard/test_types.py +++ b/tests/integration/standard/test_types.py @@ -31,6 +31,7 @@ from cassandra.cqltypes import Int32Type, EMPTY from cassandra.query import dict_factory from cassandra.util import OrderedDict, sortedset +from collections import namedtuple from tests.integration import get_server_versions, use_singledc, PROTOCOL_VERSION @@ -151,6 +152,8 @@ def test_blob_type_as_bytearray(self): r timeuuid, s varchar, t varint, + u date, + v time, PRIMARY KEY (a, b) ) """ @@ -188,9 +191,14 @@ def test_basic_types(self): v4_uuid, # uuid v1_uuid, # timeuuid u"sometext\u1234", # varchar - 123456789123456789123456789 # varint + 123456789123456789123456789, # varint + '2014-01-01', # date + '01:02:03.456789012' # time ] + SimpleDate = namedtuple('SimpleDate', 'value') + Time = namedtuple('Time', 'value') + expected_vals = ( "sometext", "sometext", @@ -210,12 +218,14 @@ def test_basic_types(self): v4_uuid, # uuid v1_uuid, # timeuuid u"sometext\u1234", # varchar - 123456789123456789123456789 # varint + 123456789123456789123456789, # varint + SimpleDate(2147499719), # date + Time(3723456789012) # time ) s.execute(""" - INSERT INTO mytable (a, b, c, d, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t) - VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s) + INSERT INTO mytable (a, b, c, d, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v) + VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s) """, params) results = s.execute("SELECT * FROM mytable") @@ -225,8 +235,8 @@ def test_basic_types(self): # try the same thing with a prepared statement prepared = s.prepare(""" - INSERT INTO mytable (a, b, c, d, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t) - VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) + INSERT INTO mytable (a, b, c, d, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) """) s.execute(prepared.bind(params)) @@ -238,7 +248,7 @@ def test_basic_types(self): # query with prepared statement prepared = s.prepare(""" - SELECT a, b, c, d, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t FROM mytable + SELECT a, b, c, d, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v FROM mytable """) results = s.execute(prepared.bind(())) @@ -265,12 +275,12 @@ def test_empty_strings_and_nones(self): s.execute("INSERT INTO mytable (a, b) VALUES ('a', 'b')") s.row_factory = dict_factory results = s.execute(""" - SELECT c, d, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t FROM mytable + SELECT c, d, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v FROM mytable """) self.assertTrue(all(x is None for x in results[0].values())) prepared = s.prepare(""" - SELECT c, d, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t FROM mytable + SELECT c, d, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v FROM mytable """) results = s.execute(prepared.bind(())) self.assertTrue(all(x is None for x in results[0].values())) @@ -309,48 +319,48 @@ def test_empty_strings_and_nones(self): # insert values for all columns values = ['a', 'b', 'a', 1, True, Decimal('1.0'), 0.1, 0.1, "1.2.3.4", 1, ['a'], set([1]), {'a': 1}, 'a', - datetime.now(), uuid4(), uuid1(), 'a', 1] + datetime.now(), uuid4(), uuid1(), 'a', 1, '2014-01-01', '01:02:03.456789012'] s.execute(""" - INSERT INTO mytable (a, b, c, d, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t) - VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s) + INSERT INTO mytable (a, b, c, d, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v) + VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s) """, values) # then insert None, which should null them out null_values = values[:2] + ([None] * (len(values) - 2)) s.execute(""" - INSERT INTO mytable (a, b, c, d, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t) - VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s) + INSERT INTO mytable (a, b, c, d, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v) + VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s) """, null_values) results = s.execute(""" - SELECT c, d, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t FROM mytable + SELECT c, d, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v FROM mytable """) self.assertEqual([], [(name, val) for (name, val) in results[0].items() if val is not None]) prepared = s.prepare(""" - SELECT c, d, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t FROM mytable + SELECT c, d, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v FROM mytable """) results = s.execute(prepared.bind(())) self.assertEqual([], [(name, val) for (name, val) in results[0].items() if val is not None]) # do the same thing again, but use a prepared statement to insert the nulls s.execute(""" - INSERT INTO mytable (a, b, c, d, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t) - VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s) + INSERT INTO mytable (a, b, c, d, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v) + VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s) """, values) prepared = s.prepare(""" - INSERT INTO mytable (a, b, c, d, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t) - VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) + INSERT INTO mytable (a, b, c, d, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) """) s.execute(prepared, null_values) results = s.execute(""" - SELECT c, d, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t FROM mytable + SELECT c, d, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v FROM mytable """) self.assertEqual([], [(name, val) for (name, val) in results[0].items() if val is not None]) prepared = s.prepare(""" - SELECT c, d, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t FROM mytable + SELECT c, d, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v FROM mytable """) results = s.execute(prepared.bind(())) self.assertEqual([], [(name, val) for (name, val) in results[0].items() if val is not None]) diff --git a/tests/unit/test_marshalling.py b/tests/unit/test_marshalling.py index 05239b4798..9eb4de695a 100644 --- a/tests/unit/test_marshalling.py +++ b/tests/unit/test_marshalling.py @@ -79,6 +79,8 @@ (b'\x00\x00', 'ListType(FloatType)', []), (b'\x00\x00', 'SetType(IntegerType)', sortedset()), (b'\x00\x01\x00\x10\xafYC\xa3\xea<\x11\xe1\xabc\xc4,\x03"y\xf0', 'ListType(TimeUUIDType)', [UUID(bytes=b'\xafYC\xa3\xea<\x11\xe1\xabc\xc4,\x03"y\xf0')]), + (b'\x00\x00>\xc7', 'SimpleDateType', '2014-01-01'), + (b'\x00\x00\x00\x00\x00\x00\x00\x01', 'TimeType', '00:00:00.000000001') ) ordered_dict_value = OrderedDict() diff --git a/tests/unit/test_types.py b/tests/unit/test_types.py index 5758c508aa..28f7b63ade 100644 --- a/tests/unit/test_types.py +++ b/tests/unit/test_types.py @@ -19,12 +19,14 @@ import unittest # noqa from binascii import unhexlify +import calendar import datetime import time import cassandra from cassandra.cqltypes import (BooleanType, lookup_casstype_simple, lookup_casstype, LongType, DecimalType, SetType, cql_typename, CassandraType, UTF8Type, parse_casstype_args, + SimpleDateType, TimeType, EmptyValue, _CassandraType, DateType, int64_pack) from cassandra.query import named_tuple_factory from cassandra.protocol import (write_string, read_longstring, write_stringmap, @@ -52,7 +54,9 @@ def test_lookup_casstype_simple(self): self.assertEqual(lookup_casstype_simple('Int32Type'), cassandra.cqltypes.Int32Type) self.assertEqual(lookup_casstype_simple('UTF8Type'), cassandra.cqltypes.UTF8Type) self.assertEqual(lookup_casstype_simple('DateType'), cassandra.cqltypes.DateType) + self.assertEqual(lookup_casstype_simple('SimpleDateType'), cassandra.cqltypes.SimpleDateType) self.assertEqual(lookup_casstype_simple('TimeUUIDType'), cassandra.cqltypes.TimeUUIDType) + self.assertEqual(lookup_casstype_simple('TimeType'), cassandra.cqltypes.TimeType) self.assertEqual(lookup_casstype_simple('UUIDType'), cassandra.cqltypes.UUIDType) self.assertEqual(lookup_casstype_simple('IntegerType'), cassandra.cqltypes.IntegerType) self.assertEqual(lookup_casstype_simple('MapType'), cassandra.cqltypes.MapType) @@ -74,6 +78,7 @@ def test_lookup_casstype(self): self.assertEqual(lookup_casstype('BytesType'), cassandra.cqltypes.BytesType) self.assertEqual(lookup_casstype('BooleanType'), cassandra.cqltypes.BooleanType) self.assertEqual(lookup_casstype('CounterColumnType'), cassandra.cqltypes.CounterColumnType) + self.assertEqual(lookup_casstype('DateType'), cassandra.cqltypes.DateType) self.assertEqual(lookup_casstype('DecimalType'), cassandra.cqltypes.DecimalType) self.assertEqual(lookup_casstype('DoubleType'), cassandra.cqltypes.DoubleType) self.assertEqual(lookup_casstype('FloatType'), cassandra.cqltypes.FloatType) @@ -81,6 +86,7 @@ def test_lookup_casstype(self): self.assertEqual(lookup_casstype('Int32Type'), cassandra.cqltypes.Int32Type) self.assertEqual(lookup_casstype('UTF8Type'), cassandra.cqltypes.UTF8Type) self.assertEqual(lookup_casstype('DateType'), cassandra.cqltypes.DateType) + self.assertEqual(lookup_casstype('TimeType'), cassandra.cqltypes.TimeType) self.assertEqual(lookup_casstype('TimeUUIDType'), cassandra.cqltypes.TimeUUIDType) self.assertEqual(lookup_casstype('UUIDType'), cassandra.cqltypes.UUIDType) self.assertEqual(lookup_casstype('IntegerType'), cassandra.cqltypes.IntegerType) @@ -114,10 +120,65 @@ def test_casstype_parameterized(self): def test_datetype_from_string(self): # Ensure all formats can be parsed, without exception - for format in cassandra.cqltypes.cql_time_formats: + for format in cassandra.cqltypes.cql_timestamp_formats: date_string = str(datetime.datetime.now().strftime(format)) cassandra.cqltypes.DateType(date_string) + def test_simpledate(self): + """ + Test cassandra.cqltypes.SimpleDateType() construction + """ + + nd = SimpleDateType.interpret_simpledate_string('2014-01-01') + tval = time.strptime('2014-01-01', SimpleDateType.date_format) + manual = calendar.timegm(tval) / SimpleDateType.seconds_per_day + self.assertEqual(nd, manual) + + nd = SimpleDateType.interpret_simpledate_string('1970-01-01') + self.assertEqual(nd, 0) + + def test_time(self): + """ + Test cassandra.cqltypes.TimeType() construction + """ + + one_micro = 1000 + one_milli = 1000L*one_micro + one_second = 1000L*one_milli + one_minute = 60L*one_second + one_hour = 60L*one_minute + + nd = TimeType.interpret_timestring('00:00:00.000000001') + self.assertEqual(nd, 1) + nd = TimeType.interpret_timestring('00:00:00.000001') + self.assertEqual(nd, one_micro) + nd = TimeType.interpret_timestring('00:00:00.001') + self.assertEqual(nd, one_milli) + nd = TimeType.interpret_timestring('00:00:01') + self.assertEqual(nd, one_second) + nd = TimeType.interpret_timestring('00:01:00') + self.assertEqual(nd, one_minute) + nd = TimeType.interpret_timestring('01:00:00') + self.assertEqual(nd, one_hour) + + nd = TimeType('23:59:59.1') + nd = TimeType('23:59:59.12') + nd = TimeType('23:59:59.123') + nd = TimeType('23:59:59.1234') + nd = TimeType('23:59:59.12345') + + nd = TimeType.interpret_timestring('23:59:59.123456') + self.assertEquals(nd, 23*one_hour + 59*one_minute + 59*one_second + 123*one_milli + 456*one_micro) + + nd = TimeType.interpret_timestring('23:59:59.1234567') + self.assertEquals(nd, 23*one_hour + 59*one_minute + 59*one_second + 123*one_milli + 456*one_micro + 700) + + nd = TimeType.interpret_timestring('23:59:59.12345678') + self.assertEquals(nd, 23*one_hour + 59*one_minute + 59*one_second + 123*one_milli + 456*one_micro + 780) + + nd = TimeType.interpret_timestring('23:59:59.123456789') + self.assertEquals(nd, 23*one_hour + 59*one_minute + 59*one_second + 123*one_milli + 456*one_micro + 789) + def test_cql_typename(self): """ Smoke test cql_typename From 4bb8b313c0d684b286904cbe4ac962fdf73b5dd1 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Wed, 14 Jan 2015 08:35:37 -0800 Subject: [PATCH 1134/3726] made create_keyspace more sane, fixed broken tests. create_keyspace no longer has defaults for SimpleStrategy and replication factor. changed tests to use the default keyspace, cqlengine_test for all tests. Not sure why other keyspaces were being used. --- cqlengine/management.py | 7 ++-- cqlengine/tests/__init__.py | 5 +++ .../tests/columns/test_container_columns.py | 8 ++--- .../tests/columns/test_counter_column.py | 2 +- cqlengine/tests/columns/test_static_column.py | 14 ++++---- cqlengine/tests/columns/test_validation.py | 24 ++++++------- cqlengine/tests/columns/test_value_io.py | 2 +- .../management/test_compaction_settings.py | 20 +++++------ cqlengine/tests/management/test_management.py | 18 +++++----- .../tests/model/test_class_construction.py | 34 +++++++++---------- .../tests/model/test_equality_operations.py | 2 +- cqlengine/tests/model/test_model.py | 10 +++--- cqlengine/tests/model/test_model_io.py | 14 ++++---- cqlengine/tests/model/test_polymorphism.py | 16 ++++----- cqlengine/tests/model/test_updates.py | 2 +- cqlengine/tests/model/test_value_lists.py | 4 +-- cqlengine/tests/query/test_batch_query.py | 6 ++-- .../tests/query/test_datetime_queries.py | 2 +- cqlengine/tests/query/test_queryoperators.py | 4 +-- cqlengine/tests/query/test_queryset.py | 10 +++--- cqlengine/tests/query/test_updates.py | 2 +- cqlengine/tests/test_batch_query.py | 2 +- cqlengine/tests/test_consistency.py | 2 +- cqlengine/tests/test_ifnotexists.py | 6 +--- cqlengine/tests/test_load.py | 2 +- cqlengine/tests/test_timestamp.py | 1 - cqlengine/tests/test_transaction.py | 1 - cqlengine/tests/test_ttl.py | 4 +-- 28 files changed, 109 insertions(+), 115 deletions(-) diff --git a/cqlengine/management.py b/cqlengine/management.py index 124ae8ace1..a6ba47bc81 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -18,7 +18,7 @@ # system keyspaces schema_columnfamilies = NamedTable('system', 'schema_columnfamilies') -def create_keyspace(name, strategy_class='SimpleStrategy', replication_factor=3, durable_writes=True, **replication_values): +def create_keyspace(name, strategy_class, replication_factor, durable_writes=True, **replication_values): """ creates a keyspace @@ -62,7 +62,7 @@ def delete_keyspace(name): def create_table(model, create_missing_keyspace=True): raise CQLEngineException("create_table is deprecated, please use sync_table") -def sync_table(model, create_missing_keyspace=True): +def sync_table(model): """ Inspects the model and creates / updates the corresponding table and columns. @@ -86,9 +86,6 @@ def sync_table(model, create_missing_keyspace=True): raw_cf_name = model.column_family_name(include_keyspace=False) ks_name = model._get_keyspace() - #create missing keyspace - if create_missing_keyspace: - create_keyspace(ks_name) cluster = get_cluster() diff --git a/cqlengine/tests/__init__.py b/cqlengine/tests/__init__.py index 87cb239575..e03842d32b 100644 --- a/cqlengine/tests/__init__.py +++ b/cqlengine/tests/__init__.py @@ -1,6 +1,9 @@ import os + from cqlengine import connection +from cqlengine.management import create_keyspace + def setup_package(): try: @@ -23,3 +26,5 @@ def setup_package(): connection.setup([CASSANDRA_TEST_HOST], protocol_version=protocol_version, default_keyspace='cqlengine_test') + + create_keyspace("cqlengine_test", replication_factor=1, strategy_class="SimpleStrategy") diff --git a/cqlengine/tests/columns/test_container_columns.py b/cqlengine/tests/columns/test_container_columns.py index 03cbd7452f..f26962ed27 100644 --- a/cqlengine/tests/columns/test_container_columns.py +++ b/cqlengine/tests/columns/test_container_columns.py @@ -10,7 +10,7 @@ class TestSetModel(Model): - __keyspace__ = 'test' + partition = columns.UUID(primary_key=True, default=uuid4) int_set = columns.Set(columns.Integer, required=False) @@ -163,7 +163,7 @@ def test_default_empty_container_saving(self): class TestListModel(Model): - __keyspace__ = 'test' + partition = columns.UUID(primary_key=True, default=uuid4) int_list = columns.List(columns.Integer, required=False) @@ -309,7 +309,7 @@ def test_blind_list_updates_from_none(self): assert m3.int_list == [] class TestMapModel(Model): - __keyspace__ = 'test' + partition = columns.UUID(primary_key=True, default=uuid4) int_map = columns.Map(columns.Integer, columns.UUID, required=False) @@ -509,7 +509,7 @@ def test_default_empty_container_saving(self): class TestCamelMapModel(Model): - __keyspace__ = 'test' + partition = columns.UUID(primary_key=True, default=uuid4) camelMap = columns.Map(columns.Text, columns.Integer, required=False) diff --git a/cqlengine/tests/columns/test_counter_column.py b/cqlengine/tests/columns/test_counter_column.py index 8e8b2361e4..964cacb8db 100644 --- a/cqlengine/tests/columns/test_counter_column.py +++ b/cqlengine/tests/columns/test_counter_column.py @@ -8,7 +8,7 @@ class TestCounterModel(Model): - __keyspace__ = 'test' + partition = columns.UUID(primary_key=True, default=uuid4) cluster = columns.UUID(primary_key=True, default=uuid4) counter = columns.Counter() diff --git a/cqlengine/tests/columns/test_static_column.py b/cqlengine/tests/columns/test_static_column.py index e8cfdedb9a..d25c0cf0f9 100644 --- a/cqlengine/tests/columns/test_static_column.py +++ b/cqlengine/tests/columns/test_static_column.py @@ -9,7 +9,7 @@ class TestStaticModel(Model): - __keyspace__ = 'test' + partition = columns.UUID(primary_key=True, default=uuid4) cluster = columns.UUID(primary_key=True, default=uuid4) static = columns.Text(static=True) @@ -35,15 +35,15 @@ def test_mixed_updates(self): """ Tests that updates on both static and non-static columns work as intended """ instance = TestStaticModel.create() instance.static = "it's shared" - instance.text = "some text" + instance.text = "some text" instance.save() - + u = TestStaticModel.get(partition=instance.partition) u.static = "it's still shared" u.text = "another text" u.update() actual = TestStaticModel.get(partition=u.partition) - + assert actual.static == "it's still shared" @skipUnless(PROTOCOL_VERSION >= 2, "only runs against the cql3 protocol v2.0") @@ -51,12 +51,12 @@ def test_static_only_updates(self): """ Tests that updates on static only column work as intended """ instance = TestStaticModel.create() instance.static = "it's shared" - instance.text = "some text" + instance.text = "some text" instance.save() - + u = TestStaticModel.get(partition=instance.partition) u.static = "it's still shared" u.update() - actual = TestStaticModel.get(partition=u.partition) + actual = TestStaticModel.get(partition=u.partition) assert actual.static == "it's still shared" diff --git a/cqlengine/tests/columns/test_validation.py b/cqlengine/tests/columns/test_validation.py index d665237c29..a4a939e854 100644 --- a/cqlengine/tests/columns/test_validation.py +++ b/cqlengine/tests/columns/test_validation.py @@ -33,7 +33,7 @@ class TestDatetime(BaseCassEngTestCase): class DatetimeTest(Model): - __keyspace__ = 'test' + test_id = Integer(primary_key=True) created_at = DateTime() @@ -82,7 +82,7 @@ def test_datetime_none(self): class TestBoolDefault(BaseCassEngTestCase): class BoolDefaultValueTest(Model): - __keyspace__ = 'test' + test_id = Integer(primary_key=True) stuff = Boolean(default=True) @@ -99,7 +99,7 @@ def test_default_is_set(self): class TestBoolValidation(BaseCassEngTestCase): class BoolValidationTest(Model): - __keyspace__ = 'test' + test_id = Integer(primary_key=True) bool_column = Boolean() @@ -116,7 +116,7 @@ def test_validation_preserves_none(self): class TestVarInt(BaseCassEngTestCase): class VarIntTest(Model): - __keyspace__ = 'test' + test_id = Integer(primary_key=True) bignum = VarInt(primary_key=True) @@ -141,7 +141,7 @@ def test_varint_io(self): class TestDate(BaseCassEngTestCase): class DateTest(Model): - __keyspace__ = 'test' + test_id = Integer(primary_key=True) created_at = Date() @@ -180,7 +180,7 @@ def test_date_none(self): class TestDecimal(BaseCassEngTestCase): class DecimalTest(Model): - __keyspace__ = 'test' + test_id = Integer(primary_key=True) dec_val = Decimal() @@ -205,7 +205,7 @@ def test_decimal_io(self): class TestUUID(BaseCassEngTestCase): class UUIDTest(Model): - __keyspace__ = 'test' + test_id = Integer(primary_key=True) a_uuid = UUID(default=uuid4()) @@ -233,7 +233,7 @@ def test_uuid_str_no_dashes(self): class TestTimeUUID(BaseCassEngTestCase): class TimeUUIDTest(Model): - __keyspace__ = 'test' + test_id = Integer(primary_key=True) timeuuid = TimeUUID(default=uuid1()) @@ -259,7 +259,7 @@ def test_timeuuid_io(self): class TestInteger(BaseCassEngTestCase): class IntegerTest(Model): - __keyspace__ = 'test' + test_id = UUID(primary_key=True, default=lambda:uuid4()) value = Integer(default=0, required=True) @@ -270,7 +270,7 @@ def test_default_zero_fields_validate(self): class TestBigInt(BaseCassEngTestCase): class BigIntTest(Model): - __keyspace__ = 'test' + test_id = UUID(primary_key=True, default=lambda:uuid4()) value = BigInt(default=0, required=True) @@ -328,7 +328,7 @@ def test_non_required_validation(self): class TestExtraFieldsRaiseException(BaseCassEngTestCase): class TestModel(Model): - __keyspace__ = 'test' + id = UUID(primary_key=True, default=uuid4) def test_extra_field(self): @@ -337,7 +337,7 @@ def test_extra_field(self): class TestPythonDoesntDieWhenExtraFieldIsInCassandra(BaseCassEngTestCase): class TestModel(Model): - __keyspace__ = 'test' + __table_name__ = 'alter_doesnt_break_running_app' id = UUID(primary_key=True, default=uuid4) diff --git a/cqlengine/tests/columns/test_value_io.py b/cqlengine/tests/columns/test_value_io.py index b7587afa31..d3b589f029 100644 --- a/cqlengine/tests/columns/test_value_io.py +++ b/cqlengine/tests/columns/test_value_io.py @@ -41,7 +41,7 @@ def setUpClass(cls): # create a table with the given column class IOTestModel(Model): - __keyspace__ = 'test' + table_name = cls.column.db_type + "_io_test_model_{}".format(uuid4().hex[:8]) pkey = cls.column(primary_key=True) data = cls.column() diff --git a/cqlengine/tests/management/test_compaction_settings.py b/cqlengine/tests/management/test_compaction_settings.py index 2cad5d554a..9607334e7d 100644 --- a/cqlengine/tests/management/test_compaction_settings.py +++ b/cqlengine/tests/management/test_compaction_settings.py @@ -9,7 +9,7 @@ class CompactionModel(Model): - __keyspace__ = 'test' + __compaction__ = None cid = columns.UUID(primary_key=True) name = columns.Text() @@ -74,7 +74,7 @@ def test_sstable_size_in_mb(self): class LeveledcompactionTestTable(Model): - __keyspace__ = 'test' + __compaction__ = LeveledCompactionStrategy __compaction_sstable_size_in_mb__ = 64 @@ -96,7 +96,7 @@ def test_compaction_not_altered_without_changes_leveled(self): from cqlengine.management import update_compaction class LeveledCompactionChangesDetectionTest(Model): - __keyspace__ = 'test' + __compaction__ = LeveledCompactionStrategy __compaction_sstable_size_in_mb__ = 160 __compaction_tombstone_threshold__ = 0.125 @@ -113,7 +113,7 @@ def test_compaction_not_altered_without_changes_sizetiered(self): from cqlengine.management import update_compaction class SizeTieredCompactionChangesDetectionTest(Model): - __keyspace__ = 'test' + __compaction__ = SizeTieredCompactionStrategy __compaction_bucket_high__ = 20 __compaction_bucket_low__ = 10 @@ -146,7 +146,7 @@ def test_alter_actually_alters(self): def test_alter_options(self): class AlterTable(Model): - __keyspace__ = 'test' + __compaction__ = LeveledCompactionStrategy __compaction_sstable_size_in_mb__ = 64 @@ -163,7 +163,7 @@ class AlterTable(Model): class EmptyCompactionTest(BaseCassEngTestCase): def test_empty_compaction(self): class EmptyCompactionModel(Model): - __keyspace__ = 'test' + __compaction__ = None cid = columns.UUID(primary_key=True) name = columns.Text() @@ -173,14 +173,14 @@ class EmptyCompactionModel(Model): class CompactionLeveledStrategyModel(Model): - __keyspace__ = 'test' + __compaction__ = LeveledCompactionStrategy cid = columns.UUID(primary_key=True) name = columns.Text() class CompactionSizeTieredModel(Model): - __keyspace__ = 'test' + __compaction__ = SizeTieredCompactionStrategy cid = columns.UUID(primary_key=True) name = columns.Text() @@ -191,7 +191,7 @@ class OptionsTest(BaseCassEngTestCase): def test_all_size_tiered_options(self): class AllSizeTieredOptionsModel(Model): - __keyspace__ = 'test' + __compaction__ = SizeTieredCompactionStrategy __compaction_bucket_low__ = .3 __compaction_bucket_high__ = 2 @@ -220,7 +220,7 @@ class AllSizeTieredOptionsModel(Model): def test_all_leveled_options(self): class AllLeveledOptionsModel(Model): - __keyspace__ = 'test' + __compaction__ = LeveledCompactionStrategy __compaction_sstable_size_in_mb__ = 64 diff --git a/cqlengine/tests/management/test_management.py b/cqlengine/tests/management/test_management.py index 55258e52b9..5dae9ff18f 100644 --- a/cqlengine/tests/management/test_management.py +++ b/cqlengine/tests/management/test_management.py @@ -14,7 +14,7 @@ class CreateKeyspaceTest(BaseCassEngTestCase): def test_create_succeeeds(self): - management.create_keyspace('test_keyspace') + management.create_keyspace('test_keyspace', strategy_class="SimpleStrategy", replication_factor=1) management.delete_keyspace('test_keyspace') class DeleteTableTest(BaseCassEngTestCase): @@ -29,19 +29,19 @@ def test_multiple_deletes_dont_fail(self): drop_table(TestModel) class LowercaseKeyModel(Model): - __keyspace__ = 'test' + first_key = columns.Integer(primary_key=True) second_key = columns.Integer(primary_key=True) some_data = columns.Text() class CapitalizedKeyModel(Model): - __keyspace__ = 'test' + firstKey = columns.Integer(primary_key=True) secondKey = columns.Integer(primary_key=True) someData = columns.Text() class PrimaryKeysOnlyModel(Model): - __keyspace__ = 'test' + __compaction__ = LeveledCompactionStrategy first_ey = columns.Integer(primary_key=True) @@ -60,14 +60,14 @@ def test_table_definition(self): class FirstModel(Model): - __keyspace__ = 'test' + __table_name__ = 'first_model' first_key = columns.UUID(primary_key=True) second_key = columns.UUID() third_key = columns.Text() class SecondModel(Model): - __keyspace__ = 'test' + __table_name__ = 'first_model' first_key = columns.UUID(primary_key=True) second_key = columns.UUID() @@ -75,7 +75,7 @@ class SecondModel(Model): fourth_key = columns.Text() class ThirdModel(Model): - __keyspace__ = 'test' + __table_name__ = 'first_model' first_key = columns.UUID(primary_key=True) second_key = columns.UUID() @@ -84,7 +84,7 @@ class ThirdModel(Model): blah = columns.Map(columns.Text, columns.Text) class FourthModel(Model): - __keyspace__ = 'test' + __table_name__ = 'first_model' first_key = columns.UUID(primary_key=True) second_key = columns.UUID() @@ -118,7 +118,7 @@ def test_add_column(self): class ModelWithTableProperties(Model): - __keyspace__ = 'test' + # Set random table properties __bloom_filter_fp_chance__ = 0.76328 __caching__ = CACHING_ALL diff --git a/cqlengine/tests/model/test_class_construction.py b/cqlengine/tests/model/test_class_construction.py index 7ae758746b..7616d79c2a 100644 --- a/cqlengine/tests/model/test_class_construction.py +++ b/cqlengine/tests/model/test_class_construction.py @@ -20,7 +20,7 @@ def test_column_attributes_handled_correctly(self): """ class TestModel(Model): - __keyspace__ = 'test' + id = columns.UUID(primary_key=True, default=lambda:uuid4()) text = columns.Text() @@ -42,7 +42,7 @@ def test_db_map(self): -the db_map allows columns """ class WildDBNames(Model): - __keyspace__ = 'test' + id = columns.UUID(primary_key=True, default=lambda:uuid4()) content = columns.Text(db_field='words_and_whatnot') numbers = columns.Integer(db_field='integers_etc') @@ -67,7 +67,7 @@ def test_column_ordering_is_preserved(self): """ class Stuff(Model): - __keyspace__ = 'test' + id = columns.UUID(primary_key=True, default=lambda:uuid4()) words = columns.Text() content = columns.Text() @@ -78,7 +78,7 @@ class Stuff(Model): def test_exception_raised_when_creating_class_without_pk(self): with self.assertRaises(ModelDefinitionException): class TestModel(Model): - __keyspace__ = 'test' + count = columns.Integer() text = columns.Text(required=False) @@ -88,7 +88,7 @@ def test_value_managers_are_keeping_model_instances_isolated(self): Tests that instance value managers are isolated from other instances """ class Stuff(Model): - __keyspace__ = 'test' + id = columns.UUID(primary_key=True, default=lambda:uuid4()) num = columns.Integer() @@ -104,7 +104,7 @@ def test_superclass_fields_are_inherited(self): Tests that fields defined on the super class are inherited properly """ class TestModel(Model): - __keyspace__ = 'test' + id = columns.UUID(primary_key=True, default=lambda:uuid4()) text = columns.Text() @@ -117,7 +117,7 @@ class InheritedModel(TestModel): def test_column_family_name_generation(self): """ Tests that auto column family name generation works as expected """ class TestModel(Model): - __keyspace__ = 'test' + id = columns.UUID(primary_key=True, default=lambda:uuid4()) text = columns.Text() @@ -153,7 +153,7 @@ def test_partition_keys(self): Test compound partition key definition """ class ModelWithPartitionKeys(cqlengine.Model): - __keyspace__ = 'test' + id = columns.UUID(primary_key=True, default=lambda:uuid4()) c1 = cqlengine.Text(primary_key=True) p1 = cqlengine.Text(partition_key=True) @@ -175,7 +175,7 @@ class ModelWithPartitionKeys(cqlengine.Model): def test_del_attribute_is_assigned_properly(self): """ Tests that columns that can be deleted have the del attribute """ class DelModel(Model): - __keyspace__ = 'test' + id = columns.UUID(primary_key=True, default=lambda:uuid4()) key = columns.Integer(primary_key=True) data = columns.Integer(required=False) @@ -189,11 +189,11 @@ def test_does_not_exist_exceptions_are_not_shared_between_model(self): """ Tests that DoesNotExist exceptions are not the same exception between models """ class Model1(Model): - __keyspace__ = 'test' + id = columns.UUID(primary_key=True, default=lambda:uuid4()) class Model2(Model): - __keyspace__ = 'test' + id = columns.UUID(primary_key=True, default=lambda:uuid4()) try: @@ -207,7 +207,7 @@ class Model2(Model): def test_does_not_exist_inherits_from_superclass(self): """ Tests that a DoesNotExist exception can be caught by it's parent class DoesNotExist """ class Model1(Model): - __keyspace__ = 'test' + id = columns.UUID(primary_key=True, default=lambda:uuid4()) class Model2(Model1): @@ -244,14 +244,14 @@ def test_proper_table_naming(self): class AbstractModel(Model): __abstract__ = True - __keyspace__ = 'test' + class ConcreteModel(AbstractModel): pkey = columns.Integer(primary_key=True) data = columns.Integer() class AbstractModelWithCol(Model): - __keyspace__ = 'test' + __abstract__ = True pkey = columns.Integer(primary_key=True) @@ -260,7 +260,7 @@ class ConcreteModelWithCol(AbstractModelWithCol): class AbstractModelWithFullCols(Model): __abstract__ = True - __keyspace__ = 'test' + pkey = columns.Integer(primary_key=True) data = columns.Integer() @@ -333,7 +333,7 @@ def create(iself, **kwargs): class CQModel(Model): __queryset__ = QSet - __keyspace__ = 'test' + part = columns.UUID(primary_key=True) data = columns.Text() @@ -347,7 +347,7 @@ def save(iself): raise self.TestException class CDQModel(Model): - __keyspace__ = 'test' + __dmlquery__ = DMLQ part = columns.UUID(primary_key=True) data = columns.Text() diff --git a/cqlengine/tests/model/test_equality_operations.py b/cqlengine/tests/model/test_equality_operations.py index eeb8992b98..84855ad840 100644 --- a/cqlengine/tests/model/test_equality_operations.py +++ b/cqlengine/tests/model/test_equality_operations.py @@ -8,7 +8,7 @@ from cqlengine import columns class TestModel(Model): - __keyspace__ = 'test' + id = columns.UUID(primary_key=True, default=lambda:uuid4()) count = columns.Integer() text = columns.Text(required=False) diff --git a/cqlengine/tests/model/test_model.py b/cqlengine/tests/model/test_model.py index 9d4e0199ae..592ff4b86a 100644 --- a/cqlengine/tests/model/test_model.py +++ b/cqlengine/tests/model/test_model.py @@ -10,7 +10,7 @@ class TestModel(TestCase): def test_instance_equality(self): """ tests the model equality functionality """ class EqualityModel(Model): - __keyspace__ = 'test' + pk = columns.Integer(primary_key=True) m0 = EqualityModel(pk=0) @@ -22,11 +22,11 @@ class EqualityModel(Model): def test_model_equality(self): """ tests the model equality functionality """ class EqualityModel0(Model): - __keyspace__ = 'test' + pk = columns.Integer(primary_key=True) class EqualityModel1(Model): - __keyspace__ = 'test' + kk = columns.Integer(primary_key=True) m0 = EqualityModel0(pk=0) @@ -43,7 +43,7 @@ def test_model_with_attribute_name_conflict(self): """should raise exception when model defines column that conflicts with built-in attribute""" with self.assertRaises(ModelDefinitionException): class IllegalTimestampColumnModel(Model): - __keyspace__ = 'test' + my_primary_key = columns.Integer(primary_key=True) timestamp = columns.BigInt() @@ -51,7 +51,7 @@ def test_model_with_method_name_conflict(self): """should raise exception when model defines column that conflicts with built-in method""" with self.assertRaises(ModelDefinitionException): class IllegalFilterColumnModel(Model): - __keyspace__ = 'test' + my_primary_key = columns.Integer(primary_key=True) filter = columns.Text() diff --git a/cqlengine/tests/model/test_model_io.py b/cqlengine/tests/model/test_model_io.py index e19f7ad0b5..e15c5c43df 100644 --- a/cqlengine/tests/model/test_model_io.py +++ b/cqlengine/tests/model/test_model_io.py @@ -11,14 +11,14 @@ from cqlengine import columns class TestModel(Model): - __keyspace__ = 'test' + id = columns.UUID(primary_key=True, default=lambda:uuid4()) count = columns.Integer() text = columns.Text(required=False) a_bool = columns.Boolean(default=False) class TestModel(Model): - __keyspace__ = 'test' + id = columns.UUID(primary_key=True, default=lambda:uuid4()) count = columns.Integer() text = columns.Text(required=False) @@ -118,7 +118,7 @@ def test_a_sensical_error_is_raised_if_you_try_to_create_a_table_twice(self): class TestMultiKeyModel(Model): - __keyspace__ = 'test' + partition = columns.Integer(primary_key=True) cluster = columns.Integer(primary_key=True) count = columns.Integer(required=False) @@ -244,7 +244,7 @@ def test_success_case(self): class IndexDefinitionModel(Model): - __keyspace__ = 'test' + key = columns.UUID(primary_key=True) val = columns.Text(index=True) @@ -255,7 +255,7 @@ def test_exception_isnt_raised_if_an_index_is_defined_more_than_once(self): sync_table(IndexDefinitionModel) class ReservedWordModel(Model): - __keyspace__ = 'test' + token = columns.Text(primary_key=True) insert = columns.Integer(index=True) @@ -276,7 +276,7 @@ def test_reserved_cql_words_can_be_used_as_column_names(self): class TestQueryModel(Model): - __keyspace__ = 'test' + test_id = columns.UUID(primary_key=True, default=uuid4) date = columns.Date(primary_key=True) description = columns.Text() @@ -311,7 +311,7 @@ def test_query_with_date(self): def test_none_filter_fails(): class NoneFilterModel(Model): - __keyspace__ = 'test' + pk = columns.Integer(primary_key=True) v = columns.Integer() sync_table(NoneFilterModel) diff --git a/cqlengine/tests/model/test_polymorphism.py b/cqlengine/tests/model/test_polymorphism.py index 450a6ed4cc..426269095d 100644 --- a/cqlengine/tests/model/test_polymorphism.py +++ b/cqlengine/tests/model/test_polymorphism.py @@ -14,7 +14,7 @@ def test_multiple_polymorphic_key_failure(self): """ Tests that defining a model with more than one polymorphic key fails """ with self.assertRaises(models.ModelDefinitionException): class M(models.Model): - __keyspace__ = 'test' + partition = columns.Integer(primary_key=True) type1 = columns.Integer(polymorphic_key=True) type2 = columns.Integer(polymorphic_key=True) @@ -22,7 +22,7 @@ class M(models.Model): def test_polymorphic_key_inheritance(self): """ Tests that polymorphic_key attribute is not inherited """ class Base(models.Model): - __keyspace__ = 'test' + partition = columns.Integer(primary_key=True) type1 = columns.Integer(polymorphic_key=True) @@ -37,7 +37,7 @@ class M2(M1): def test_polymorphic_metaclass(self): """ Tests that the model meta class configures polymorphic models properly """ class Base(models.Model): - __keyspace__ = 'test' + partition = columns.Integer(primary_key=True) type1 = columns.Integer(polymorphic_key=True) @@ -58,7 +58,7 @@ class M1(Base): def test_table_names_are_inherited_from_poly_base(self): class Base(models.Model): - __keyspace__ = 'test' + partition = columns.Integer(primary_key=True) type1 = columns.Integer(polymorphic_key=True) @@ -70,13 +70,13 @@ class M1(Base): def test_collection_columns_cant_be_polymorphic_keys(self): with self.assertRaises(models.ModelDefinitionException): class Base(models.Model): - __keyspace__ = 'test' + partition = columns.Integer(primary_key=True) type1 = columns.Set(columns.Integer, polymorphic_key=True) class PolyBase(models.Model): - __keyspace__ = 'test' + partition = columns.UUID(primary_key=True, default=uuid.uuid4) row_type = columns.Integer(polymorphic_key=True) @@ -143,7 +143,7 @@ def test_delete_on_polymorphic_subclass_does_not_include_polymorphic_key(self): class UnindexedPolyBase(models.Model): - __keyspace__ = 'test' + partition = columns.UUID(primary_key=True, default=uuid.uuid4) cluster = columns.UUID(primary_key=True, default=uuid.uuid4) row_type = columns.Integer(polymorphic_key=True) @@ -201,7 +201,7 @@ def test_conflicting_type_results(self): class IndexedPolyBase(models.Model): - __keyspace__ = 'test' + partition = columns.UUID(primary_key=True, default=uuid.uuid4) cluster = columns.UUID(primary_key=True, default=uuid.uuid4) row_type = columns.Integer(polymorphic_key=True, index=True) diff --git a/cqlengine/tests/model/test_updates.py b/cqlengine/tests/model/test_updates.py index 4ffc817efd..77f64f7289 100644 --- a/cqlengine/tests/model/test_updates.py +++ b/cqlengine/tests/model/test_updates.py @@ -10,7 +10,7 @@ class TestUpdateModel(Model): - __keyspace__ = 'test' + partition = columns.UUID(primary_key=True, default=uuid4) cluster = columns.UUID(primary_key=True, default=uuid4) count = columns.Integer(required=False) diff --git a/cqlengine/tests/model/test_value_lists.py b/cqlengine/tests/model/test_value_lists.py index 306fa28d89..1b17d784df 100644 --- a/cqlengine/tests/model/test_value_lists.py +++ b/cqlengine/tests/model/test_value_lists.py @@ -8,12 +8,12 @@ class TestModel(Model): - __keyspace__ = 'test' + id = columns.Integer(primary_key=True) clustering_key = columns.Integer(primary_key=True, clustering_order='desc') class TestClusteringComplexModel(Model): - __keyspace__ = 'test' + id = columns.Integer(primary_key=True) clustering_key = columns.Integer(primary_key=True, clustering_order='desc') some_value = columns.Integer() diff --git a/cqlengine/tests/query/test_batch_query.py b/cqlengine/tests/query/test_batch_query.py index 74d2724a53..3875375fdf 100644 --- a/cqlengine/tests/query/test_batch_query.py +++ b/cqlengine/tests/query/test_batch_query.py @@ -12,14 +12,14 @@ class TestMultiKeyModel(Model): - __keyspace__ = 'test' + partition = columns.Integer(primary_key=True) cluster = columns.Integer(primary_key=True) count = columns.Integer(required=False) text = columns.Text(required=False) class BatchQueryLogModel(Model): - __keyspace__ = 'test' + # simple k/v table k = columns.Integer(primary_key=True) v = columns.Integer() @@ -170,7 +170,7 @@ def test_batch_execute_on_exception_skips_if_not_specified(self): pass obj = BatchQueryLogModel.objects(k=2) - + # should be 0 because the batch should not execute self.assertEqual(0, len(obj)) diff --git a/cqlengine/tests/query/test_datetime_queries.py b/cqlengine/tests/query/test_datetime_queries.py index 8179b0f290..704e55e388 100644 --- a/cqlengine/tests/query/test_datetime_queries.py +++ b/cqlengine/tests/query/test_datetime_queries.py @@ -11,7 +11,7 @@ from cqlengine import query class DateTimeQueryTestModel(Model): - __keyspace__ = 'test' + user = columns.Integer(primary_key=True) day = columns.DateTime(primary_key=True) data = columns.Text() diff --git a/cqlengine/tests/query/test_queryoperators.py b/cqlengine/tests/query/test_queryoperators.py index 5572d00c56..3facdd6828 100644 --- a/cqlengine/tests/query/test_queryoperators.py +++ b/cqlengine/tests/query/test_queryoperators.py @@ -39,7 +39,7 @@ def test_mintimeuuid_function(self): class TokenTestModel(Model): - __keyspace__ = 'test' + key = columns.Integer(primary_key=True) val = columns.Integer() @@ -75,7 +75,7 @@ def test_token_function(self): def test_compound_pk_token_function(self): class TestModel(Model): - __keyspace__ = 'test' + p1 = columns.Text(partition_key=True) p2 = columns.Text(partition_key=True) diff --git a/cqlengine/tests/query/test_queryset.py b/cqlengine/tests/query/test_queryset.py index 8383ccdf91..441d6e213c 100644 --- a/cqlengine/tests/query/test_queryset.py +++ b/cqlengine/tests/query/test_queryset.py @@ -43,7 +43,7 @@ def dst(self, dt): class TestModel(Model): - __keyspace__ = 'test' + test_id = columns.Integer(primary_key=True) attempt_id = columns.Integer(primary_key=True) description = columns.Text() @@ -52,7 +52,7 @@ class TestModel(Model): class IndexedTestModel(Model): - __keyspace__ = 'test' + test_id = columns.Integer(primary_key=True) attempt_id = columns.Integer(index=True) description = columns.Text() @@ -61,7 +61,7 @@ class IndexedTestModel(Model): class TestMultiClusteringModel(Model): - __keyspace__ = 'test' + one = columns.Integer(primary_key=True) two = columns.Integer(primary_key=True) three = columns.Integer(primary_key=True) @@ -380,7 +380,7 @@ def test_allow_filtering_flag(self): def test_non_quality_filtering(): class NonEqualityFilteringModel(Model): - __keyspace__ = 'test' + example_id = columns.UUID(primary_key=True, default=uuid.uuid4) sequence_id = columns.Integer(primary_key=True) # sequence_id is a clustering key example_type = columns.Integer(index=True) @@ -550,7 +550,7 @@ def test_conn_is_returned_after_filling_cache(self): class TimeUUIDQueryModel(Model): - __keyspace__ = 'test' + partition = columns.UUID(primary_key=True) time = columns.TimeUUID(primary_key=True) data = columns.Text(required=False) diff --git a/cqlengine/tests/query/test_updates.py b/cqlengine/tests/query/test_updates.py index 1037cf5a2a..97fb7e327d 100644 --- a/cqlengine/tests/query/test_updates.py +++ b/cqlengine/tests/query/test_updates.py @@ -9,7 +9,7 @@ class TestQueryUpdateModel(Model): - __keyspace__ = 'test' + partition = columns.UUID(primary_key=True, default=uuid4) cluster = columns.Integer(primary_key=True) count = columns.Integer(required=False) diff --git a/cqlengine/tests/test_batch_query.py b/cqlengine/tests/test_batch_query.py index 39204a594f..780dee53e2 100644 --- a/cqlengine/tests/test_batch_query.py +++ b/cqlengine/tests/test_batch_query.py @@ -11,7 +11,7 @@ from cqlengine.tests.base import BaseCassEngTestCase class TestMultiKeyModel(Model): - __keyspace__ = 'test' + partition = columns.Integer(primary_key=True) cluster = columns.Integer(primary_key=True) count = columns.Integer(required=False) diff --git a/cqlengine/tests/test_consistency.py b/cqlengine/tests/test_consistency.py index a45117b00f..7438fccf1b 100644 --- a/cqlengine/tests/test_consistency.py +++ b/cqlengine/tests/test_consistency.py @@ -7,7 +7,7 @@ from cqlengine import ALL, BatchQuery class TestConsistencyModel(Model): - __keyspace__ = 'test' + id = columns.UUID(primary_key=True, default=lambda:uuid4()) count = columns.Integer() text = columns.Text(required=False) diff --git a/cqlengine/tests/test_ifnotexists.py b/cqlengine/tests/test_ifnotexists.py index 6749ae8820..aa5ae5969e 100644 --- a/cqlengine/tests/test_ifnotexists.py +++ b/cqlengine/tests/test_ifnotexists.py @@ -11,8 +11,6 @@ class TestIfNotExistsModel(Model): - __keyspace__ = 'cqlengine_test_lwt' - id = columns.UUID(primary_key=True, default=lambda:uuid4()) count = columns.Integer() text = columns.Text(required=False) @@ -29,14 +27,12 @@ def setUpClass(cls): is 3 and one node only. Therefore I have create a new keyspace with replica_factor:1. """ - create_keyspace(TestIfNotExistsModel.__keyspace__, replication_factor=1) sync_table(TestIfNotExistsModel) @classmethod def tearDownClass(cls): super(BaseCassEngTestCase, cls).tearDownClass() drop_table(TestIfNotExistsModel) - delete_keyspace(TestIfNotExistsModel.__keyspace__) class IfNotExistsInsertTests(BaseIfNotExistsTest): @@ -139,7 +135,7 @@ def test_queryset_is_returned_on_class(self): self.assertTrue(isinstance(qs, TestIfNotExistsModel.__queryset__), type(qs)) def test_batch_if_not_exists(self): - """ ensure 'IF NOT EXISTS' exists in statement when in batch """ + """ ensure 'IF NOT EXISTS' exists in statement when in batch """ with mock.patch.object(self.session, 'execute') as m: with BatchQuery() as b: TestIfNotExistsModel.batch(b).if_not_exists().create(count=8) diff --git a/cqlengine/tests/test_load.py b/cqlengine/tests/test_load.py index 0db4243bf6..3e784ac8bd 100644 --- a/cqlengine/tests/test_load.py +++ b/cqlengine/tests/test_load.py @@ -8,7 +8,7 @@ import gc class LoadTest(Model): - __keyspace__ = 'test' + k = Integer(primary_key=True) v = Integer() diff --git a/cqlengine/tests/test_timestamp.py b/cqlengine/tests/test_timestamp.py index cd298cf457..b1a6a2860d 100644 --- a/cqlengine/tests/test_timestamp.py +++ b/cqlengine/tests/test_timestamp.py @@ -12,7 +12,6 @@ class TestTimestampModel(Model): - __keyspace__ = 'test' id = columns.UUID(primary_key=True, default=lambda:uuid4()) count = columns.Integer() diff --git a/cqlengine/tests/test_transaction.py b/cqlengine/tests/test_transaction.py index 6080a65d3a..eacb366016 100644 --- a/cqlengine/tests/test_transaction.py +++ b/cqlengine/tests/test_transaction.py @@ -15,7 +15,6 @@ class TestTransactionModel(Model): - __keyspace__ = 'test' id = columns.UUID(primary_key=True, default=lambda:uuid4()) count = columns.Integer() text = columns.Text(required=False) diff --git a/cqlengine/tests/test_ttl.py b/cqlengine/tests/test_ttl.py index 66fa8334ab..fc3cf57f20 100644 --- a/cqlengine/tests/test_ttl.py +++ b/cqlengine/tests/test_ttl.py @@ -8,7 +8,6 @@ class TestTTLModel(Model): - __keyspace__ = 'test' id = columns.UUID(primary_key=True, default=lambda:uuid4()) count = columns.Integer() text = columns.Text(required=False) @@ -28,7 +27,6 @@ def tearDownClass(cls): class TestDefaultTTLModel(Model): - __keyspace__ = 'test' __default_ttl__ = 20 id = columns.UUID(primary_key=True, default=lambda:uuid4()) count = columns.Integer() @@ -180,4 +178,4 @@ def test_override_default_ttl(self): TestDefaultTTLModel.objects(id=tid).ttl(None).update(text="aligators expired") query = m.call_args[0][0].query_string - self.assertNotIn("USING TTL", query) \ No newline at end of file + self.assertNotIn("USING TTL", query) From 1122018f4546c4a4de1b5f90593e2b0538eec6a6 Mon Sep 17 00:00:00 2001 From: Mission Liao Date: Thu, 15 Jan 2015 10:50:54 +0800 Subject: [PATCH 1135/3726] raise exception when if_not_exists is not capable. raise exception when calling if_not_exists for table with counter columns. refer to cqlengine#302 for details. --- cqlengine/exceptions.py | 1 + cqlengine/query.py | 4 +++- cqlengine/tests/test_ifnotexists.py | 34 +++++++++++++++++++++++++++-- 3 files changed, 36 insertions(+), 3 deletions(-) diff --git a/cqlengine/exceptions.py b/cqlengine/exceptions.py index f40f8af700..19b5c1fe37 100644 --- a/cqlengine/exceptions.py +++ b/cqlengine/exceptions.py @@ -5,3 +5,4 @@ class ValidationError(CQLEngineException): pass class UndefinedKeyspaceException(CQLEngineException): pass class LWTException(CQLEngineException): pass +class IfNotExistsWithCounterColumn(CQLEngineException): pass diff --git a/cqlengine/query.py b/cqlengine/query.py index 5f4c2cb593..b220ace10c 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -7,7 +7,7 @@ from .connection import execute, NOT_SET -from cqlengine.exceptions import CQLEngineException, ValidationError, LWTException +from cqlengine.exceptions import CQLEngineException, ValidationError, LWTException, IfNotExistsWithCounterColumn from cqlengine.functions import Token, BaseQueryFunction, QueryValue, UnicodeMixin #CQL 3 reference: @@ -784,6 +784,8 @@ def timestamp(self, timestamp): return clone def if_not_exists(self): + if self.model._has_counter: + raise IfNotExistsWithCounterColumn('if_not_exists cannot be used with tables containing columns') clone = copy.deepcopy(self) clone._if_not_exists = True return clone diff --git a/cqlengine/tests/test_ifnotexists.py b/cqlengine/tests/test_ifnotexists.py index aa5ae5969e..6b65dd2593 100644 --- a/cqlengine/tests/test_ifnotexists.py +++ b/cqlengine/tests/test_ifnotexists.py @@ -3,7 +3,7 @@ from cqlengine.tests.base import BaseCassEngTestCase from cqlengine.tests.base import PROTOCOL_VERSION from cqlengine.models import Model -from cqlengine.exceptions import LWTException +from cqlengine.exceptions import LWTException, IfNotExistsWithCounterColumn from cqlengine import columns, BatchQuery from uuid import uuid4 import mock @@ -16,6 +16,12 @@ class TestIfNotExistsModel(Model): text = columns.Text(required=False) +class TestIfNotExistsWithCounterModel(Model): + + id = columns.UUID(primary_key=True, default=lambda:uuid4()) + likes = columns.Counter() + + class BaseIfNotExistsTest(BaseCassEngTestCase): @classmethod @@ -31,10 +37,23 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): - super(BaseCassEngTestCase, cls).tearDownClass() + super(BaseIfNotExistsTest, cls).tearDownClass() drop_table(TestIfNotExistsModel) +class BaseIfNotExistsWithCounterTest(BaseCassEngTestCase): + + @classmethod + def setUpClass(cls): + super(BaseIfNotExistsWithCounterTest, cls).setUpClass() + sync_table(TestIfNotExistsWithCounterModel) + + @classmethod + def tearDownClass(cls): + super(BaseIfNotExistsWithCounterTest, cls).tearDownClass() + drop_table(TestIfNotExistsWithCounterModel) + + class IfNotExistsInsertTests(BaseIfNotExistsTest): @skipUnless(PROTOCOL_VERSION >= 2, "only runs against the cql3 protocol v2.0") @@ -170,3 +189,14 @@ def test_if_not_exists_is_not_include_with_query_on_update(self): self.assertNotIn("IF NOT EXIST", query) +class IfNotExistWithCounterTest(BaseIfNotExistsWithCounterTest): + + def test_instance_raise_exception(self): + """ make sure exception is raised when calling + if_not_exists on table with counter column + """ + id = uuid4() + self.assertRaises( + IfNotExistsWithCounterColumn, + TestIfNotExistsWithCounterModel.if_not_exists + ) From 00f322a20451a49ad389d53df7ea0f25e4ac2e75 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 14 Jan 2015 17:42:39 -0600 Subject: [PATCH 1136/3726] New OrderedMap type in support of nested collections PYTHON-186 Updated existing tests, still need to add tests for nested collections. --- cassandra/cqltypes.py | 6 +-- cassandra/encoder.py | 3 +- cassandra/util.py | 70 ++++++++++++++++++++++++++++++++++ tests/unit/test_marshalling.py | 13 +++---- 4 files changed, 81 insertions(+), 11 deletions(-) diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index 100c2968c9..ad65a4db5f 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -47,7 +47,7 @@ int32_pack, int32_unpack, int64_pack, int64_unpack, float_pack, float_unpack, double_pack, double_unpack, varint_pack, varint_unpack) -from cassandra.util import OrderedDict, sortedset +from cassandra.util import OrderedMap, sortedset apache_cassandra_type_prefix = 'org.apache.cassandra.db.marshal.' @@ -741,7 +741,7 @@ def deserialize_safe(cls, byts, protocol_version): length = 2 numelements = unpack(byts[:length]) p = length - themap = OrderedDict() + themap = OrderedMap() for _ in range(numelements): key_len = unpack(byts[p:p + length]) p += length @@ -753,7 +753,7 @@ def deserialize_safe(cls, byts, protocol_version): p += val_len key = subkeytype.from_binary(keybytes, protocol_version) val = subvaltype.from_binary(valbytes, protocol_version) - themap[key] = val + themap._insert(key, val) return themap @classmethod diff --git a/cassandra/encoder.py b/cassandra/encoder.py index 8b1bb2fe73..9b478901fa 100644 --- a/cassandra/encoder.py +++ b/cassandra/encoder.py @@ -28,7 +28,7 @@ from uuid import UUID import six -from cassandra.util import OrderedDict, sortedset +from cassandra.util import OrderedDict, OrderedMap, sortedset if six.PY3: long = int @@ -76,6 +76,7 @@ def __init__(self): datetime.date: self.cql_encode_date, dict: self.cql_encode_map_collection, OrderedDict: self.cql_encode_map_collection, + OrderedMap: self.cql_encode_map_collection, list: self.cql_encode_list_collection, tuple: self.cql_encode_list_collection, set: self.cql_encode_set_collection, diff --git a/cassandra/util.py b/cassandra/util.py index d02219ffdc..87f520eb57 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -555,3 +555,73 @@ def _intersect(self, other): if item in other: isect.add(item) return isect + +from collections import Mapping +import six +from six.moves import cPickle + +class OrderedMap(Mapping): + ''' + An ordered map that accepts non-hashable types for keys. + + Implemented in support of Cassandra nested collections. + + ''' + def __init__(self, *args, **kwargs): + if len(args) > 1: + raise TypeError('expected at most 1 arguments, got %d' % len(args)) + + self._items = [] + self._index = {} + if args: + e = args[0] + if callable(getattr(e, 'keys', None)): + for k in e.keys(): + self._items.append((k, e[k])) + else: + for k, v in e: + self._insert(k, v) + + for k, v in six.iteritems(kwargs): + self._insert(k, v) + + def _insert(self, key, value): + flat_key = self._serialize_key(key) + i = self._index.get(flat_key, -1) + if i >= 0: + self._items[i] = (key, value) + else: + self._items.append((key, value)) + self._index[flat_key] = len(self._items) - 1 + + def __getitem__(self, key): + index = self._index[self._serialize_key(key)] + return self._items[index][1] + + def __iter__(self): + for i in self._items: + yield i[0] + + def __len__(self): + return len(self._items) + + def __eq__(self, other): + if isinstance(other, OrderedMap): + return self._items == other._items + try: + d = dict(other) + return len(d) == len(self._items) and all(i[1] == d[i[0]] for i in self._items) + except KeyError: + return False + except TypeError: + pass + return NotImplemented + + def __repr__(self): + return '%s[%s]' % ( + self.__class__.__name__, + ', '.join("(%r, %r)" % (k, v) for k, v in self._items)) + + @staticmethod + def _serialize_key(key): + return cPickle.dumps(key) diff --git a/tests/unit/test_marshalling.py b/tests/unit/test_marshalling.py index 05239b4798..54686e24c4 100644 --- a/tests/unit/test_marshalling.py +++ b/tests/unit/test_marshalling.py @@ -24,7 +24,7 @@ from uuid import UUID from cassandra.cqltypes import lookup_casstype -from cassandra.util import OrderedDict, sortedset +from cassandra.util import OrderedMap, sortedset marshalled_value_pairs = ( # binary form, type, python native type @@ -75,21 +75,20 @@ (b'', 'MapType(AsciiType, BooleanType)', None), (b'', 'ListType(FloatType)', None), (b'', 'SetType(LongType)', None), - (b'\x00\x00', 'MapType(DecimalType, BooleanType)', OrderedDict()), + (b'\x00\x00', 'MapType(DecimalType, BooleanType)', OrderedMap()), (b'\x00\x00', 'ListType(FloatType)', []), (b'\x00\x00', 'SetType(IntegerType)', sortedset()), (b'\x00\x01\x00\x10\xafYC\xa3\xea<\x11\xe1\xabc\xc4,\x03"y\xf0', 'ListType(TimeUUIDType)', [UUID(bytes=b'\xafYC\xa3\xea<\x11\xe1\xabc\xc4,\x03"y\xf0')]), ) -ordered_dict_value = OrderedDict() -ordered_dict_value[u'\u307fbob'] = 199 -ordered_dict_value[u''] = -1 -ordered_dict_value[u'\\'] = 0 +ordered_map_value = OrderedMap([(u'\u307fbob', 199), + (u'', -1), + (u'\\', 0)]) # these following entries work for me right now, but they're dependent on # vagaries of internal python ordering for unordered types marshalled_value_pairs_unsafe = ( - (b'\x00\x03\x00\x06\xe3\x81\xbfbob\x00\x04\x00\x00\x00\xc7\x00\x00\x00\x04\xff\xff\xff\xff\x00\x01\\\x00\x04\x00\x00\x00\x00', 'MapType(UTF8Type, Int32Type)', ordered_dict_value), + (b'\x00\x03\x00\x06\xe3\x81\xbfbob\x00\x04\x00\x00\x00\xc7\x00\x00\x00\x04\xff\xff\xff\xff\x00\x01\\\x00\x04\x00\x00\x00\x00', 'MapType(UTF8Type, Int32Type)', ordered_map_value), (b'\x00\x02\x00\x08@\x01\x99\x99\x99\x99\x99\x9a\x00\x08@\x14\x00\x00\x00\x00\x00\x00', 'SetType(DoubleType)', sortedset([2.2, 5.0])), (b'\x00', 'IntegerType', 0), ) From e824331d9b5aaed2b44b37329b41c0f8177bef2e Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 15 Jan 2015 09:52:07 -0600 Subject: [PATCH 1137/3726] Unit test for ordered map. PYTHON-186 --- tests/unit/test_orderedmap.py | 127 ++++++++++++++++++++++++++++++++++ 1 file changed, 127 insertions(+) create mode 100644 tests/unit/test_orderedmap.py diff --git a/tests/unit/test_orderedmap.py b/tests/unit/test_orderedmap.py new file mode 100644 index 0000000000..3cee3a11c5 --- /dev/null +++ b/tests/unit/test_orderedmap.py @@ -0,0 +1,127 @@ +# Copyright 2013-2014 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa + +from cassandra.util import OrderedMap +from cassandra.cqltypes import EMPTY +import six + +class OrderedMapTest(unittest.TestCase): + def test_init(self): + a = OrderedMap(zip(['one', 'three', 'two'], [1, 3, 2])) + b = OrderedMap([('one', 1), ('three', 3), ('two', 2)]) + c = OrderedMap(a) + builtin = {'one': 1, 'two': 2, 'three': 3} + self.assertEqual(a, b) + self.assertEqual(a, c) + self.assertEqual(a, builtin) + self.assertEqual(OrderedMap([(1, 1), (1, 2)]), {1: 2}) + + def test_contains(self): + keys = ['first', 'middle', 'last'] + + od = OrderedMap() + + od = OrderedMap(zip(keys, range(len(keys)))) + + for k in keys: + self.assertTrue(k in od) + self.assertFalse(k not in od) + + self.assertTrue('notthere' not in od) + self.assertFalse('notthere' in od) + + def test_keys(self): + keys = ['first', 'middle', 'last'] + od = OrderedMap(zip(keys, range(len(keys)))) + + self.assertListEqual(list(od.keys()), keys) + + def test_values(self): + keys = ['first', 'middle', 'last'] + values = list(range(len(keys))) + od = OrderedMap(zip(keys, values)) + + self.assertListEqual(list(od.values()), values) + + def test_items(self): + keys = ['first', 'middle', 'last'] + items = list(zip(keys, range(len(keys)))) + od = OrderedMap(items) + + self.assertListEqual(list(od.items()), items) + + def test_get(self): + keys = ['first', 'middle', 'last'] + od = OrderedMap(zip(keys, range(len(keys)))) + + for v, k in enumerate(keys): + self.assertEqual(od.get(k), v) + + self.assertEqual(od.get('notthere', 'default'), 'default') + self.assertIsNone(od.get('notthere')) + + def test_equal(self): + d1 = {'one': 1} + d12 = {'one': 1, 'two': 2} + od1 = OrderedMap({'one': 1}) + od12 = OrderedMap([('one', 1), ('two', 2)]) + od21 = OrderedMap([('two', 2), ('one', 1)]) + + self.assertEqual(od1, d1) + self.assertEqual(od12, d12) + self.assertEqual(od21, d12) + self.assertNotEqual(od1, od12) + self.assertNotEqual(od12, od1) + self.assertNotEqual(od12, od21) + self.assertNotEqual(od1, d12) + self.assertNotEqual(od12, d1) + self.assertNotEqual(od1, EMPTY) + + def test_getitem(self): + keys = ['first', 'middle', 'last'] + od = OrderedMap(zip(keys, range(len(keys)))) + + for v, k in enumerate(keys): + self.assertEqual(od[k], v) + + with self.assertRaises(KeyError): + od['notthere'] + + def test_iter(self): + keys = ['first', 'middle', 'last'] + values = list(range(len(keys))) + items = list(zip(keys, values)) + od = OrderedMap(items) + + itr = iter(od) + self.assertEqual(sum([1 for _ in itr]), len(keys)) + self.assertRaises(StopIteration, six.next, itr) + + self.assertEqual(list(iter(od)), keys) + self.assertEqual(list(six.iteritems(od)), items) + self.assertEqual(list(six.itervalues(od)), values) + + def test_len(self): + self.assertEqual(len(OrderedMap()), 0) + self.assertEqual(len(OrderedMap([(1, 1)])), 1) + + def test_mutable_keys(self): + d = {'1': 1} + s = set([1, 2, 3]) + od = OrderedMap([(d, 'dict'), (s, 'set')]) From 0a51e23aa8ddcba8d8b9ffdc1ea9f35adb6b6ce6 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 15 Jan 2015 11:19:08 -0600 Subject: [PATCH 1138/3726] Update existing integration test for new map type. --- cassandra/util.py | 3 +++ tests/integration/standard/test_types.py | 6 +++--- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/cassandra/util.py b/cassandra/util.py index 87f520eb57..aa06d6d435 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -622,6 +622,9 @@ def __repr__(self): self.__class__.__name__, ', '.join("(%r, %r)" % (k, v) for k, v in self._items)) + def __str__(self): + return '{%s}' % ', '.join("%r: %r" % (k, v) for k, v in self._items) + @staticmethod def _serialize_key(key): return cPickle.dumps(key) diff --git a/tests/integration/standard/test_types.py b/tests/integration/standard/test_types.py index fb985fa7dc..b0a47c06eb 100644 --- a/tests/integration/standard/test_types.py +++ b/tests/integration/standard/test_types.py @@ -30,7 +30,7 @@ from cassandra.cluster import Cluster from cassandra.cqltypes import Int32Type, EMPTY from cassandra.query import dict_factory -from cassandra.util import OrderedDict, sortedset +from cassandra.util import OrderedMap, sortedset from tests.integration import get_server_versions, use_singledc, PROTOCOL_VERSION @@ -279,11 +279,11 @@ def test_empty_strings_and_nones(self): s.execute("INSERT INTO mytable (a, b, c, o, s, l, n) VALUES ('a', 'b', %s, %s, %s, %s, %s)", ('', '', '', [''], {'': 3})) self.assertEqual( - {'c': '', 'o': '', 's': '', 'l': [''], 'n': OrderedDict({'': 3})}, + {'c': '', 'o': '', 's': '', 'l': [''], 'n': OrderedMap({'': 3})}, s.execute("SELECT c, o, s, l, n FROM mytable WHERE a='a' AND b='b'")[0]) self.assertEqual( - {'c': '', 'o': '', 's': '', 'l': [''], 'n': OrderedDict({'': 3})}, + {'c': '', 'o': '', 's': '', 'l': [''], 'n': OrderedMap({'': 3})}, s.execute(s.prepare("SELECT c, o, s, l, n FROM mytable WHERE a='a' AND b='b'"), [])[0]) # non-string types shouldn't accept empty strings From 0c40ccf1cfb21917e43065939c3df842b727d8f4 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 15 Jan 2015 14:45:23 -0800 Subject: [PATCH 1139/3726] version bump --- cqlengine/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cqlengine/VERSION b/cqlengine/VERSION index 5a03fb737b..885415662f 100644 --- a/cqlengine/VERSION +++ b/cqlengine/VERSION @@ -1 +1 @@ -0.20.0 +0.21.0 From ab61fe9ecb026e42a9ab4be5a0b25b8cc3dc901e Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Thu, 15 Jan 2015 15:02:01 -0800 Subject: [PATCH 1140/3726] updated changelog --- changelog | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/changelog b/changelog index 88a4894fdf..efc6cb67f8 100644 --- a/changelog +++ b/changelog @@ -1,5 +1,17 @@ CHANGELOG +0.21.0 + +Create keyspace no longer defaults to SimpleStrategy with 3 replicas. It must be manually specified. +sync_table() no longer has an option to create keyspaces automatically. +cqlengine_test is now the default keyspace used for tests and is explicitly created +all references hardcoded to the "test" keyspace have been removed and use the default instead +instead of testing with bin/nose, use setup_package +improved validation error messages to include column name +fixed updating empty sets +improvements to puppet download script +per query timeouts now supported, see https://cqlengine.readthedocs.org/en/latest/topics/queryset.html?highlight=timeout#per-query-timeouts + 0.19.0 Fixed tests with Cassandra version 1.2 and 2.1 From 56638346b8759a35b55c7c819e4e41c21d65ca9f Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 15 Jan 2015 19:40:45 -0600 Subject: [PATCH 1141/3726] Python 3 syntax --- tests/integration/standard/test_authentication.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/standard/test_authentication.py b/tests/integration/standard/test_authentication.py index 08832ecd0f..fdb338bf40 100644 --- a/tests/integration/standard/test_authentication.py +++ b/tests/integration/standard/test_authentication.py @@ -59,7 +59,7 @@ def get_authentication_provider(self, username, password): :return: authentication object suitable for Cluster.connect() """ if PROTOCOL_VERSION < 2: - return lambda(hostname): dict(username=username, password=password) + return lambda hostname: dict(username=username, password=password) else: return PlainTextAuthProvider(username=username, password=password) From de8ae577a22643d429a2a129601ec323ca434cb3 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 15 Jan 2015 20:19:14 -0600 Subject: [PATCH 1142/3726] Updates to 'date' and 'time' type handling. datetime.date --> 'date' datetime.time --> 'time' datetime.time seemed to be the best native type for time, but it doesn't have nanosecond resolution. Clients must use int types for that. --- cassandra/cqltypes.py | 144 ++++++++++++----------- cassandra/encoder.py | 12 +- tests/integration/standard/test_types.py | 16 ++- tests/unit/test_marshalling.py | 7 +- tests/unit/test_types.py | 105 ++++++++++------- 5 files changed, 159 insertions(+), 125 deletions(-) diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index 9d375d4a02..abcb4a1029 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -36,7 +36,7 @@ import re import socket import time -from datetime import datetime, timedelta +import datetime from uuid import UUID import warnings @@ -55,8 +55,8 @@ if six.PY3: _number_types = frozenset((int, float)) - _time_types = frozenset((int)) - _date_types = frozenset((int)) + _time_types = frozenset((int,)) + _date_types = frozenset((int,)) long = int else: _number_types = frozenset((int, long, float)) @@ -74,6 +74,15 @@ def unix_time_from_uuid1(u): return (u.time - 0x01B21DD213814000) / 10000000.0 +def datetime_from_timestamp(timestamp): + if timestamp >= 0: + dt = datetime.datetime.utcfromtimestamp(timestamp) + else: + # PYTHON-119: workaround for Windows + dt = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=timestamp) + return dt + + _casstypes = {} @@ -543,26 +552,26 @@ class DateType(_CassandraType): typename = 'timestamp' @classmethod - def validate(cls, date): - if isinstance(date, six.string_types): - date = cls.interpret_datestring(date) - return date + def validate(cls, val): + if isinstance(val, six.string_types): + val = cls.interpret_datestring(val) + return val @staticmethod - def interpret_datestring(date): - if date[-5] in ('+', '-'): - offset = (int(date[-4:-2]) * 3600 + int(date[-2:]) * 60) * int(date[-5] + '1') - date = date[:-5] + def interpret_datestring(val): + if val[-5] in ('+', '-'): + offset = (int(val[-4:-2]) * 3600 + int(val[-2:]) * 60) * int(val[-5] + '1') + val = val[:-5] else: offset = -time.timezone for tformat in cql_timestamp_formats: try: - tval = time.strptime(date, tformat) + tval = time.strptime(val, tformat) except ValueError: continue return calendar.timegm(tval) + offset else: - raise ValueError("can't interpret %r as a date" % (date,)) + raise ValueError("can't interpret %r as a date" % (val,)) def my_timestamp(self): return self.val @@ -570,12 +579,7 @@ def my_timestamp(self): @staticmethod def deserialize(byts, protocol_version): timestamp = int64_unpack(byts) / 1000.0 - if timestamp >= 0: - dt = datetime.utcfromtimestamp(timestamp) - else: - # PYTHON-119: workaround for Windows - dt = datetime(1970, 1, 1) + timedelta(seconds=timestamp) - return dt + return datetime_from_timestamp(timestamp) @staticmethod def serialize(v, protocol_version): @@ -635,85 +639,87 @@ class SimpleDateType(_CassandraType): date_format = "%Y-%m-%d" @classmethod - def validate(cls, date): - if isinstance(date, basestring): - date = cls.interpret_simpledate_string(date) - return date + def validate(cls, val): + if isinstance(val, six.string_types): + val = cls.interpret_simpledate_string(val) + elif (not isinstance(val, datetime.date)) and (type(val) not in _date_types): + raise TypeError('SimpleDateType arg must be a datetime.date, unsigned integer, or string in the format YYYY-MM-DD') + return val @staticmethod def interpret_simpledate_string(v): - try: - tval = time.strptime(v, SimpleDateType.date_format) - # shift upward w/epoch at 2**31 - return (calendar.timegm(tval) / SimpleDateType.seconds_per_day) + 2**31 - except TypeError: - # Ints are valid dates too - if type(v) not in _date_types: - raise TypeError('Date arguments must be an unsigned integer or string in the format YYYY-MM-DD') - return v + date_time = datetime.datetime.strptime(v, SimpleDateType.date_format) + return datetime.date(date_time.year, date_time.month, date_time.day) @staticmethod def serialize(val, protocol_version): - date_val = SimpleDateType.interpret_simpledate_string(val) - return uint32_pack(date_val) + # Values of the 'date'` type are encoded as 32-bit unsigned integers + # representing a number of days with "the epoch" at the center of the + # range (2^31). Epoch is January 1st, 1970 + try: + shifted = (calendar.timegm(val.timetuple()) // SimpleDateType.seconds_per_day) + 2 ** 31 + except AttributeError: + shifted = val + return uint32_pack(shifted) @staticmethod def deserialize(byts, protocol_version): - Result = namedtuple('SimpleDate', 'value') - return Result(value=uint32_unpack(byts)) + timestamp = SimpleDateType.seconds_per_day * (uint32_unpack(byts) - 2 ** 31) + dt = datetime.datetime.utcfromtimestamp(timestamp) + return datetime.date(dt.year, dt.month, dt.day) class TimeType(_CassandraType): typename = 'time' - ONE_MICRO=1000 - ONE_MILLI=1000*ONE_MICRO - ONE_SECOND=1000*ONE_MILLI - ONE_MINUTE=60*ONE_SECOND - ONE_HOUR=60*ONE_MINUTE + ONE_MICRO = 1000 + ONE_MILLI = 1000 * ONE_MICRO + ONE_SECOND = 1000 * ONE_MILLI + ONE_MINUTE = 60 * ONE_SECOND + ONE_HOUR = 60 * ONE_MINUTE @classmethod def validate(cls, val): - if isinstance(val, basestring): - time = cls.interpret_timestring(val) - return time + if isinstance(val, six.string_types): + val = cls.interpret_timestring(val) + elif (not isinstance(val, datetime.time)) and (type(val) not in _time_types): + raise TypeError('TimeType arguments must be a string or whole number') + return val @staticmethod def interpret_timestring(val): try: nano = 0 - try: - base_time_str = val - if '.' in base_time_str: - base_time_str = val[0:val.find('.')] - base_time = time.strptime(base_time_str, "%H:%M:%S") - nano = base_time.tm_hour * TimeType.ONE_HOUR - nano += base_time.tm_min * TimeType.ONE_MINUTE - nano += base_time.tm_sec * TimeType.ONE_SECOND - - if '.' in val: - nano_time_str = val[val.find('.')+1:] - # right pad to 9 digits - while len(nano_time_str) < 9: - nano_time_str += "0" - nano += int(nano_time_str) - - except AttributeError as e: - if type(val) not in _time_types: - raise TypeError('TimeType arguments must be a string or whole number') - # long / int values passed in are acceptable too - nano = val + parts = val.split('.') + base_time = time.strptime(parts[0], "%H:%M:%S") + nano = (base_time.tm_hour * TimeType.ONE_HOUR + + base_time.tm_min * TimeType.ONE_MINUTE + + base_time.tm_sec * TimeType.ONE_SECOND) + + if len(parts) > 1: + # right pad to 9 digits + nano_time_str = parts[1] + "0" * (9 - len(parts[1])) + nano += int(nano_time_str) + return nano - except ValueError as e: + except ValueError: raise ValueError("can't interpret %r as a time" % (val,)) @staticmethod def serialize(val, protocol_version): - return int64_pack(TimeType.interpret_timestring(val)) + # Values of the @time@ type are encoded as 64-bit signed integers + # representing the number of nanoseconds since midnight. + try: + nano = (val.hour * TimeType.ONE_HOUR + + val.minute * TimeType.ONE_MINUTE + + val.second * TimeType.ONE_SECOND + + val.microsecond * TimeType.ONE_MICRO) + except AttributeError: + nano = val + return int64_pack(nano) @staticmethod def deserialize(byts, protocol_version): - Result = namedtuple('Time', 'value') - return Result(value=int64_unpack(byts)) + return int64_unpack(byts) class UTF8Type(_CassandraType): diff --git a/cassandra/encoder.py b/cassandra/encoder.py index 8b1bb2fe73..2f3cfa921b 100644 --- a/cassandra/encoder.py +++ b/cassandra/encoder.py @@ -74,6 +74,7 @@ def __init__(self): UUID: self.cql_encode_object, datetime.datetime: self.cql_encode_datetime, datetime.date: self.cql_encode_date, + datetime.time: self.cql_encode_time, dict: self.cql_encode_map_collection, OrderedDict: self.cql_encode_map_collection, list: self.cql_encode_list_collection, @@ -146,9 +147,16 @@ def cql_encode_datetime(self, val): def cql_encode_date(self, val): """ Converts a :class:`datetime.date` object to a string with format - ``YYYY-MM-DD-0000``. + ``YYYY-MM-DD``. """ - return "'%s'" % val.strftime('%Y-%m-%d-0000') + return "'%s'" % val.strftime('%Y-%m-%d') + + def cql_encode_time(self, val): + """ + Converts a :class:`datetime.date` object to a string with format + ``HH:MM:SS.mmmuuunnn``. + """ + return "'%s'" % val def cql_encode_sequence(self, val): """ diff --git a/tests/integration/standard/test_types.py b/tests/integration/standard/test_types.py index 44978e67dc..4944725ec6 100644 --- a/tests/integration/standard/test_types.py +++ b/tests/integration/standard/test_types.py @@ -22,7 +22,7 @@ log = logging.getLogger(__name__) from decimal import Decimal -from datetime import datetime +from datetime import datetime, date, time import six from uuid import uuid1, uuid4 @@ -31,7 +31,6 @@ from cassandra.cqltypes import Int32Type, EMPTY from cassandra.query import dict_factory from cassandra.util import OrderedDict, sortedset -from collections import namedtuple from tests.integration import get_server_versions, use_singledc, PROTOCOL_VERSION @@ -171,6 +170,8 @@ def test_basic_types(self): v1_uuid = uuid1() v4_uuid = uuid4() mydatetime = datetime(2013, 12, 31, 23, 59, 59, 999000) + mydate = date(2015, 1, 15) + mytime = time(16, 47, 25, 7) params = [ "sometext", @@ -192,13 +193,10 @@ def test_basic_types(self): v1_uuid, # timeuuid u"sometext\u1234", # varchar 123456789123456789123456789, # varint - '2014-01-01', # date - '01:02:03.456789012' # time + mydate, # date + mytime ] - SimpleDate = namedtuple('SimpleDate', 'value') - Time = namedtuple('Time', 'value') - expected_vals = ( "sometext", "sometext", @@ -219,8 +217,8 @@ def test_basic_types(self): v1_uuid, # timeuuid u"sometext\u1234", # varchar 123456789123456789123456789, # varint - SimpleDate(2147499719), # date - Time(3723456789012) # time + mydate, # date + 60445000007000 # time ) s.execute(""" diff --git a/tests/unit/test_marshalling.py b/tests/unit/test_marshalling.py index 9eb4de695a..a329b4f5ef 100644 --- a/tests/unit/test_marshalling.py +++ b/tests/unit/test_marshalling.py @@ -19,7 +19,7 @@ import unittest # noqa import platform -from datetime import datetime +from datetime import datetime, date from decimal import Decimal from uuid import UUID @@ -79,8 +79,9 @@ (b'\x00\x00', 'ListType(FloatType)', []), (b'\x00\x00', 'SetType(IntegerType)', sortedset()), (b'\x00\x01\x00\x10\xafYC\xa3\xea<\x11\xe1\xabc\xc4,\x03"y\xf0', 'ListType(TimeUUIDType)', [UUID(bytes=b'\xafYC\xa3\xea<\x11\xe1\xabc\xc4,\x03"y\xf0')]), - (b'\x00\x00>\xc7', 'SimpleDateType', '2014-01-01'), - (b'\x00\x00\x00\x00\x00\x00\x00\x01', 'TimeType', '00:00:00.000000001') + (b'\x80\x00\x00\x01', 'SimpleDateType', date(1970,1,2)), + (b'\x7f\xff\xff\xff', 'SimpleDateType', date(1969,12,31)), + (b'\x00\x00\x00\x00\x00\x00\x00\x01', 'TimeType', 1) ) ordered_dict_value = OrderedDict() diff --git a/tests/unit/test_types.py b/tests/unit/test_types.py index 28f7b63ade..42e2c9922f 100644 --- a/tests/unit/test_types.py +++ b/tests/unit/test_types.py @@ -128,56 +128,77 @@ def test_simpledate(self): """ Test cassandra.cqltypes.SimpleDateType() construction """ + # from string + expected_date = datetime.date(1492, 10, 12) + sd = SimpleDateType('1492-10-12') + self.assertEqual(sd.val, expected_date) - nd = SimpleDateType.interpret_simpledate_string('2014-01-01') - tval = time.strptime('2014-01-01', SimpleDateType.date_format) - manual = calendar.timegm(tval) / SimpleDateType.seconds_per_day - self.assertEqual(nd, manual) + # date + sd = SimpleDateType(expected_date) + self.assertEqual(sd.val, expected_date) - nd = SimpleDateType.interpret_simpledate_string('1970-01-01') - self.assertEqual(nd, 0) + # int + expected_timestamp = calendar.timegm(expected_date.timetuple()) + sd = SimpleDateType(expected_timestamp) + self.assertEqual(sd.val, expected_timestamp) + + # no contruct + self.assertRaises(ValueError, SimpleDateType, '1999-10-10-bad-time') + self.assertRaises(TypeError, SimpleDateType, 1.234) def test_time(self): """ Test cassandra.cqltypes.TimeType() construction """ - one_micro = 1000 - one_milli = 1000L*one_micro - one_second = 1000L*one_milli - one_minute = 60L*one_second - one_hour = 60L*one_minute - - nd = TimeType.interpret_timestring('00:00:00.000000001') - self.assertEqual(nd, 1) - nd = TimeType.interpret_timestring('00:00:00.000001') - self.assertEqual(nd, one_micro) - nd = TimeType.interpret_timestring('00:00:00.001') - self.assertEqual(nd, one_milli) - nd = TimeType.interpret_timestring('00:00:01') - self.assertEqual(nd, one_second) - nd = TimeType.interpret_timestring('00:01:00') - self.assertEqual(nd, one_minute) - nd = TimeType.interpret_timestring('01:00:00') - self.assertEqual(nd, one_hour) - - nd = TimeType('23:59:59.1') - nd = TimeType('23:59:59.12') - nd = TimeType('23:59:59.123') - nd = TimeType('23:59:59.1234') - nd = TimeType('23:59:59.12345') - - nd = TimeType.interpret_timestring('23:59:59.123456') - self.assertEquals(nd, 23*one_hour + 59*one_minute + 59*one_second + 123*one_milli + 456*one_micro) - - nd = TimeType.interpret_timestring('23:59:59.1234567') - self.assertEquals(nd, 23*one_hour + 59*one_minute + 59*one_second + 123*one_milli + 456*one_micro + 700) - - nd = TimeType.interpret_timestring('23:59:59.12345678') - self.assertEquals(nd, 23*one_hour + 59*one_minute + 59*one_second + 123*one_milli + 456*one_micro + 780) - - nd = TimeType.interpret_timestring('23:59:59.123456789') - self.assertEquals(nd, 23*one_hour + 59*one_minute + 59*one_second + 123*one_milli + 456*one_micro + 789) + one_milli = 1000 * one_micro + one_second = 1000 * one_milli + one_minute = 60 * one_second + one_hour = 60 * one_minute + + # from strings + tt = TimeType('00:00:00.000000001') + self.assertEqual(tt.val, 1) + tt = TimeType('00:00:00.000001') + self.assertEqual(tt.val, one_micro) + tt = TimeType('00:00:00.001') + self.assertEqual(tt.val, one_milli) + tt = TimeType('00:00:01') + self.assertEqual(tt.val, one_second) + tt = TimeType('00:01:00') + self.assertEqual(tt.val, one_minute) + tt = TimeType('01:00:00') + self.assertEqual(tt.val, one_hour) + tt = TimeType('01:00:00.') + self.assertEqual(tt.val, one_hour) + + tt = TimeType('23:59:59.1') + tt = TimeType('23:59:59.12') + tt = TimeType('23:59:59.123') + tt = TimeType('23:59:59.1234') + tt = TimeType('23:59:59.12345') + + tt = TimeType('23:59:59.123456') + self.assertEqual(tt.val, 23*one_hour + 59*one_minute + 59*one_second + 123*one_milli + 456*one_micro) + + tt = TimeType('23:59:59.1234567') + self.assertEqual(tt.val, 23*one_hour + 59*one_minute + 59*one_second + 123*one_milli + 456*one_micro + 700) + + tt = TimeType('23:59:59.12345678') + self.assertEqual(tt.val, 23*one_hour + 59*one_minute + 59*one_second + 123*one_milli + 456*one_micro + 780) + + tt = TimeType('23:59:59.123456789') + self.assertEqual(tt.val, 23*one_hour + 59*one_minute + 59*one_second + 123*one_milli + 456*one_micro + 789) + + # from int + tt = TimeType(12345678) + self.assertEqual(tt.val, 12345678) + + # no construct + self.assertRaises(ValueError, TimeType, '1999-10-10 11:11:11.1234') + self.assertRaises(TypeError, TimeType, 1.234) + self.assertRaises(TypeError, TimeType, datetime.datetime(2004, 12, 23, 11, 11, 1)) + def test_cql_typename(self): """ From 7fb59a6f08e8f0e0e0dfcc4429264bd6ee4aa598 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 15 Jan 2015 20:49:14 -0600 Subject: [PATCH 1143/3726] Scale interpreted timestamp for DateType --- cassandra/cqltypes.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index 66c3df6603..0ce235319d 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -556,7 +556,8 @@ def interpret_datestring(date): tval = time.strptime(date, tformat) except ValueError: continue - return calendar.timegm(tval) + offset + # scale seconds to millis for the raw value + return (calendar.timegm(tval) + offset) * 1e3 else: raise ValueError("can't interpret %r as a date" % (date,)) From fbeecbb1ebb7da04388aaf109062d145aca3dc5f Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 15 Jan 2015 21:07:09 -0600 Subject: [PATCH 1144/3726] Expanded desc for OrderedMap, updated __str__ code review input --- cassandra/util.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/cassandra/util.py b/cassandra/util.py index aa06d6d435..8400b6046e 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -560,12 +560,14 @@ def _intersect(self, other): import six from six.moves import cPickle + class OrderedMap(Mapping): ''' An ordered map that accepts non-hashable types for keys. - Implemented in support of Cassandra nested collections. - + Implemented in support of Cassandra nested collections. This class dervies from + the (immutable) Mapping API. Although clients may obtain references, keys in + the map should not be modified. ''' def __init__(self, *args, **kwargs): if len(args) > 1: @@ -623,7 +625,7 @@ def __repr__(self): ', '.join("(%r, %r)" % (k, v) for k, v in self._items)) def __str__(self): - return '{%s}' % ', '.join("%r: %r" % (k, v) for k, v in self._items) + return '{%s}' % ', '.join("%s: %s" % (k, v) for k, v in self._items) @staticmethod def _serialize_key(key): From 1988a487a5b155019afc2f0ac18c39a2d130a8ab Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 16 Jan 2015 08:40:16 -0600 Subject: [PATCH 1145/3726] Add OrderedMap to API docs --- cassandra/util.py | 27 +++++++++++++++++++++++---- docs/api/cassandra/util.rst | 7 +++++++ docs/api/index.rst | 1 + 3 files changed, 31 insertions(+), 4 deletions(-) create mode 100644 docs/api/cassandra/util.rst diff --git a/cassandra/util.py b/cassandra/util.py index 8400b6046e..1ff1fe75a8 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -563,11 +563,30 @@ def _intersect(self, other): class OrderedMap(Mapping): ''' - An ordered map that accepts non-hashable types for keys. + An ordered map that accepts non-hashable types for keys. It also maintains the + insertion order of items, behaving as OrderedDict in that regard. These maps + are constructed and read just as normal mapping types, exept that they may + contain arbitrary collections and other non-hashable items as keys:: + + >>> od = OrderedMap([({'one': 1, 'two': 2}, 'value'), + ... ({'three': 3, 'four': 4}, 'value2')]) + >>> list(od.keys()) + [{'two': 2, 'one': 1}, {'three': 3, 'four': 4}] + >>> list(od.values()) + ['value', 'value2'] + + These constructs are needed to support nested collections in Cassandra 2.1.3+, + where frozen collections can be specified as parameters to others:: + + CREATE TABLE example ( + ... + value map>, double> + ... + ) + + This class dervies from the (immutable) Mapping API. Objects in these maps + are not intended be modified. - Implemented in support of Cassandra nested collections. This class dervies from - the (immutable) Mapping API. Although clients may obtain references, keys in - the map should not be modified. ''' def __init__(self, *args, **kwargs): if len(args) > 1: diff --git a/docs/api/cassandra/util.rst b/docs/api/cassandra/util.rst new file mode 100644 index 0000000000..2e79758dea --- /dev/null +++ b/docs/api/cassandra/util.rst @@ -0,0 +1,7 @@ +``cassandra.util`` - Utilities +=================================== + +.. module:: cassandra.util + +.. autoclass:: OrderedMap + :members: diff --git a/docs/api/index.rst b/docs/api/index.rst index 7db7c7eec3..d65555559d 100644 --- a/docs/api/index.rst +++ b/docs/api/index.rst @@ -16,6 +16,7 @@ API Documentation cassandra/decoder cassandra/concurrent cassandra/connection + cassandra/util cassandra/io/asyncorereactor cassandra/io/libevreactor cassandra/io/geventreactor From 9af1cec1621d3568dd8d10329ca5bf70d05854e5 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 16 Jan 2015 08:54:59 -0600 Subject: [PATCH 1146/3726] Events do not return flag state in python 2.6 review updates --- cassandra/connection.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/cassandra/connection.py b/cassandra/connection.py index fa7e72743d..3df5bf9ab8 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -758,7 +758,8 @@ def __init__(self, connection, owner): connection.in_flight += 1 def wait(self, timeout): - if self._event.wait(timeout): + self._event.wait(timeout): + if self._event.is_set(): if self._exception: raise self._exception else: @@ -788,8 +789,8 @@ def __init__(self, interval_sec, get_connection_holders): self.start() def run(self): - elapsed = 0 - while not self._shutdown_event.wait(self._interval - elapsed): + self._shutdown_event.wait(self._interval) + while not self._shutdown_event.is_set(): start_time = time.time() futures = [] @@ -827,6 +828,7 @@ def run(self): log.warning("Failed connection heartbeat", exc_info=True) elapsed = time.time() - start_time + self._shutdown_event.wait(max(self._interval - elapsed, 0.01)) def stop(self): self._shutdown_event.set() From 8433378c9e5c6a75d670a2740f95e9fb42394060 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 16 Jan 2015 09:27:52 -0600 Subject: [PATCH 1147/3726] Stop hearbeat before conn close, Heartbeat idle in_flight check. --- cassandra/cluster.py | 6 +++--- cassandra/connection.py | 14 +++++++++----- 2 files changed, 12 insertions(+), 8 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index e4b51ab69e..1bad5fb447 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -741,6 +741,9 @@ def shutdown(self): else: self.is_shutdown = True + if self._idle_heartbeat: + self._idle_heartbeat.stop() + self.scheduler.shutdown() self.control_connection.shutdown() @@ -750,9 +753,6 @@ def shutdown(self): self.executor.shutdown() - if self._idle_heartbeat: - self._idle_heartbeat.stop() - def _new_session(self): session = Session(self, self.metadata.all_hosts()) for keyspace, type_map in six.iteritems(self._user_types): diff --git a/cassandra/connection.py b/cassandra/connection.py index 3df5bf9ab8..b603f56b1e 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -679,7 +679,7 @@ def process_result(result): @property def is_idle(self): - return self.in_flight == 0 and not self.msg_received + return not self.msg_received def reset_idle(self): self.msg_received = False @@ -754,11 +754,15 @@ def __init__(self, connection, owner): log.debug("Sending options message heartbeat on idle connection %s %s", id(connection), connection.host) with connection.lock: - connection.send_msg(OptionsMessage(), connection.get_request_id(), self._options_callback) - connection.in_flight += 1 + if connection.in_flight < connection.max_request_id: + connection.in_flight += 1 + connection.send_msg(OptionsMessage(), connection.get_request_id(), self._options_callback) + else: + self._exception = Exception("Failed to send heartbeat because connection 'in_flight' exceeds threshold") + self._event.set() def wait(self, timeout): - self._event.wait(timeout): + self._event.wait(timeout) if self._event.is_set(): if self._exception: raise self._exception @@ -825,7 +829,7 @@ def run(self): connection.defunct(Exception('Connection heartbeat failure')) owner.return_connection(connection) except Exception: - log.warning("Failed connection heartbeat", exc_info=True) + log.error("Failed connection heartbeat", exc_info=True) elapsed = time.time() - start_time self._shutdown_event.wait(max(self._interval - elapsed, 0.01)) From 752c3fe5e31cc5734742d9e859b282c1c659f3eb Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 16 Jan 2015 09:59:06 -0600 Subject: [PATCH 1148/3726] Return connection to pool after set keyspace PYTHON-195 Fixes a problem where connection.in_flight count leaks when keyspace changes due to session.set_keyspace or 'USE' statements. --- cassandra/pool.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cassandra/pool.py b/cassandra/pool.py index 587fa27753..56e33fe581 100644 --- a/cassandra/pool.py +++ b/cassandra/pool.py @@ -355,6 +355,7 @@ def _set_keyspace_for_all_conns(self, keyspace, callback): return def connection_finished_setting_keyspace(conn, error): + self.return_connection(conn) errors = [] if not error else [error] callback(self, errors) @@ -683,6 +684,7 @@ def _set_keyspace_for_all_conns(self, keyspace, callback): return def connection_finished_setting_keyspace(conn, error): + self.return_connection(conn) remaining_callbacks.remove(conn) if error: errors.append(error) From 1139dd87cea713cd07f586d4f0b543091ededb26 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 16 Jan 2015 11:33:50 -0600 Subject: [PATCH 1149/3726] Simplify hex name decoding PYTHON-208 --- cassandra/cqltypes.py | 21 +++++++-------------- 1 file changed, 7 insertions(+), 14 deletions(-) diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index 5db5f7132b..28556d54d2 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -55,20 +55,13 @@ if six.PY3: _number_types = frozenset((int, float)) long = int -else: - _number_types = frozenset((int, long, float)) - -if six.PY3: - def _unhexlify(s): - if not isinstance(s, six.binary_type): - s = s.encode('ascii') - result = unhexlify(s) - if isinstance(result, six.binary_type): - result = result.decode('ascii') - return result + def _name_from_hex_string(encoded_name): + bin_str = unhexlify(encoded_name) + return bin_str.decode('ascii') else: - _unhexlify = unhexlify + _number_types = frozenset((int, long, float)) + _name_from_hex_string = unhexlify def trim_if_startswith(s, prefix): @@ -866,8 +859,8 @@ def make_udt_class(cls, keyspace, udt_name, names_and_types, mapped_class): @classmethod def apply_parameters(cls, subtypes, names): keyspace = subtypes[0] - udt_name = _unhexlify(subtypes[1].cassname) - field_names = [_unhexlify(encoded_name) for encoded_name in names[2:]] + udt_name = _name_from_hex_string(subtypes[1].cassname) + field_names = [_name_from_hex_string(encoded_name) for encoded_name in names[2:]] assert len(field_names) == len(subtypes[2:]) return type(udt_name, (cls,), {'subtypes': subtypes[2:], 'cassname': cls.cassname, From 6bfbe325b6a36dd5e423e1d4a0d4e2086e621d12 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 16 Jan 2015 11:34:22 -0600 Subject: [PATCH 1150/3726] Explicitly turn type_classes into a list for UserTypes Fixes a problem in python3, where a map generator is passed into UserType, resulting in empty parameter list in 'as_cql_query' --- cassandra/metadata.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 3fdce6965a..838f383064 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -211,7 +211,7 @@ def _build_keyspace_metadata(self, row): return KeyspaceMetadata(name, durable_writes, strategy_class, strategy_options) def _build_usertype(self, keyspace, usertype_row): - type_classes = map(types.lookup_casstype, usertype_row['field_types']) + type_classes = list(map(types.lookup_casstype, usertype_row['field_types'])) return UserType(usertype_row['keyspace_name'], usertype_row['type_name'], usertype_row['field_names'], type_classes) From fe6cfc1e8c0f12eac24df098641ae7c5568a51f8 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Fri, 16 Jan 2015 10:15:37 -0800 Subject: [PATCH 1151/3726] removed references to create_missing_keyspace #323 --- changelog | 6 ++++++ cqlengine/management.py | 6 +----- docs/topics/manage_schemas.rst | 4 +--- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/changelog b/changelog index efc6cb67f8..2eb0ccd64f 100644 --- a/changelog +++ b/changelog @@ -1,5 +1,11 @@ CHANGELOG + +0.21.1 + +removed references to create_missing_keyspace + + 0.21.0 Create keyspace no longer defaults to SimpleStrategy with 3 replicas. It must be manually specified. diff --git a/cqlengine/management.py b/cqlengine/management.py index a6ba47bc81..388efbcd68 100644 --- a/cqlengine/management.py +++ b/cqlengine/management.py @@ -59,7 +59,7 @@ def delete_keyspace(name): if name in cluster.metadata.keyspaces: execute("DROP KEYSPACE {}".format(name)) -def create_table(model, create_missing_keyspace=True): +def create_table(model): raise CQLEngineException("create_table is deprecated, please use sync_table") def sync_table(model): @@ -68,10 +68,6 @@ def sync_table(model): Note that the attributes removed from the model are not deleted on the database. They become effectively ignored by (will not show up on) the model. - - :param create_missing_keyspace: (Defaults to True) Flags to us that we need to create missing keyspace - mentioned in the model automatically. - :type create_missing_keyspace: bool """ if not issubclass(model, Model): diff --git a/docs/topics/manage_schemas.rst b/docs/topics/manage_schemas.rst index 1da5b1c9e9..5cedf8d75b 100644 --- a/docs/topics/manage_schemas.rst +++ b/docs/topics/manage_schemas.rst @@ -26,12 +26,10 @@ Once a connection has been made to Cassandra, you can use the functions in ``cql deletes the keyspace with the given name -.. function:: sync_table(model [, create_missing_keyspace=True]) +.. function:: sync_table(model) :param model: the :class:`~cqlengine.model.Model` class to make a table with :type model: :class:`~cqlengine.model.Model` - :param create_missing_keyspace: *Optional* If True, the model's keyspace will be created if it does not already exist. Defaults to ``True`` - :type create_missing_keyspace: bool syncs a python model to cassandra (creates & alters) From 32811f13334f688900fa70b5ce5b5251b8064597 Mon Sep 17 00:00:00 2001 From: Jon Haddad Date: Fri, 16 Jan 2015 10:22:08 -0800 Subject: [PATCH 1152/3726] updated readme --- README.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 0f1b785e41..594675abb0 100644 --- a/README.md +++ b/README.md @@ -20,7 +20,7 @@ cqlengine is a Cassandra CQL 3 Object Mapper for Python pip install cqlengine ``` -## Getting Started +## Getting Started on your local machine ```python #first, define a model @@ -42,6 +42,10 @@ class ExampleModel(Model): # or if you're still on cassandra 1.2 >>> connection.setup(['127.0.0.1'], "cqlengine", protocol_version=1) +# create your keyspace. This is, in general, not what you want in production +# see https://cassandra.apache.org/doc/cql3/CQL.html#createKeyspaceStmt for options +>>> create_keyspace("cqlengine", "SimpleStrategy", 1) + #...and create your CQL table >>> from cqlengine.management import sync_table >>> sync_table(ExampleModel) From 23cf630116b20fcec529ac28dd95f577df82a9c2 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 16 Jan 2015 16:36:28 -0600 Subject: [PATCH 1153/3726] Correct parenthesis for util.OrderedMap.__repr__ --- cassandra/util.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/util.py b/cassandra/util.py index 1ff1fe75a8..6bc572e84e 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -639,7 +639,7 @@ def __eq__(self, other): return NotImplemented def __repr__(self): - return '%s[%s]' % ( + return '%s([%s])' % ( self.__class__.__name__, ', '.join("(%r, %r)" % (k, v) for k, v in self._items)) From 418947cd619afcfc541c00403f131a18a17c66c2 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Sat, 17 Jan 2015 18:12:37 -0500 Subject: [PATCH 1154/3726] Add eventlet reactor, with integration test This is based on code found from the magnetodb project Eventlet needs monkey patching, which must be done early (like gevent). So the integration tests are run by specifying eventlet_nosetests --- cassandra/cluster.py | 13 +- cassandra/io/eventletreactor.py | 195 ++++++++++++++++++ setup.py | 17 ++ tests/integration/standard/test_connection.py | 17 +- 4 files changed, 235 insertions(+), 7 deletions(-) create mode 100644 cassandra/io/eventletreactor.py diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 1bad5fb447..14fff61e22 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -69,10 +69,19 @@ BatchStatement, bind_params, QueryTrace, Statement, named_tuple_factory, dict_factory, FETCH_SIZE_UNSET) -# default to gevent when we are monkey patched, otherwise if libev is available, use that as the -# default because it's fastest. Otherwise, use asyncore. +def _is_eventlet_monkey_patched(): + if not 'eventlet.patcher' in sys.modules: + return False + import eventlet.patcher + return eventlet.patcher.is_monkey_patched('socket') + +# default to gevent when we are monkey patched with gevent, eventlet when +# monkey patched with eventlet, otherwise if libev is available, use that as +# the default because it's fastest. Otherwise, use asyncore. if 'gevent.monkey' in sys.modules: from cassandra.io.geventreactor import GeventConnection as DefaultConnection +elif _is_eventlet_monkey_patched(): + from cassandra.io.eventletreactor import EventletConnection as DefaultConnection else: try: from cassandra.io.libevreactor import LibevConnection as DefaultConnection # NOQA diff --git a/cassandra/io/eventletreactor.py b/cassandra/io/eventletreactor.py new file mode 100644 index 0000000000..ac918ff8a4 --- /dev/null +++ b/cassandra/io/eventletreactor.py @@ -0,0 +1,195 @@ +# Copyright 2014 Symantec Corporation +# Copyright 2013-2014 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import collections +import eventlet +from eventlet.green import select +from eventlet.green import socket +from eventlet import queue +import errno +import functools +from six import moves +import threading + +import logging +import os + +import cassandra +from cassandra import connection as cassandra_connection +from cassandra import protocol as cassandra_protocol + + +log = logging.getLogger(__name__) + + +def is_timeout(err): + return ( + err in (errno.EINPROGRESS, errno.EALREADY, errno.EWOULDBLOCK) or + (err == errno.EINVAL and os.name in ('nt', 'ce')) + ) + + +class EventletConnection(cassandra_connection.Connection): + """ + An implementation of :class:`.Connection` that utilizes ``eventlet``. + """ + + _total_reqd_bytes = 0 + _read_watcher = None + _write_watcher = None + _socket = None + + @classmethod + def initialize_reactor(cls): + eventlet.monkey_patch() + + @classmethod + def factory(cls, *args, **kwargs): + timeout = kwargs.pop('timeout', 5.0) + conn = cls(*args, **kwargs) + conn.connected_event.wait(timeout) + if conn.last_error: + raise conn.last_error + elif not conn.connected_event.is_set(): + conn.close() + raise cassandra.OperationTimedOut("Timed out creating connection") + else: + return conn + + def __init__(self, *args, **kwargs): + cassandra_connection.Connection.__init__(self, *args, **kwargs) + + self.connected_event = threading.Event() + self._write_queue = queue.Queue() + + self._callbacks = {} + self._push_watchers = collections.defaultdict(set) + + sockerr = None + addresses = socket.getaddrinfo( + self.host, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM + ) + for (af, socktype, proto, canonname, sockaddr) in addresses: + try: + self._socket = socket.socket(af, socktype, proto) + self._socket.settimeout(1.0) + self._socket.connect(sockaddr) + sockerr = None + break + except socket.error as err: + sockerr = err + if sockerr: + raise socket.error( + sockerr.errno, + "Tried connecting to %s. Last error: %s" % ( + [a[4] for a in addresses], sockerr.strerror) + ) + + if self.sockopts: + for args in self.sockopts: + self._socket.setsockopt(*args) + + self._read_watcher = eventlet.spawn(lambda: self.handle_read()) + self._write_watcher = eventlet.spawn(lambda: self.handle_write()) + self._send_options_message() + + def close(self): + with self.lock: + if self.is_closed: + return + self.is_closed = True + + log.debug("Closing connection (%s) to %s" % (id(self), self.host)) + + cur_gthread = eventlet.getcurrent() + + if self._read_watcher and self._read_watcher != cur_gthread: + self._read_watcher.kill() + if self._write_watcher and self._write_watcher != cur_gthread: + self._write_watcher.kill() + if self._socket: + self._socket.close() + log.debug("Closed socket to %s" % (self.host,)) + + if not self.is_defunct: + self.error_all_callbacks( + cassandra_connection.ConnectionShutdown( + "Connection to %s was closed" % self.host + ) + ) + # don't leave in-progress operations hanging + self.connected_event.set() + + def handle_close(self): + log.debug("connection closed by server") + self.close() + + def handle_write(self): + while True: + try: + next_msg = self._write_queue.get() + self._socket.sendall(next_msg) + except socket.error as err: + log.debug("Exception during socket send for %s: %s", self, err) + self.defunct(err) + return # Leave the write loop + + def handle_read(self): + run_select = functools.partial(select.select, (self._socket,), (), ()) + while True: + try: + run_select() + except Exception as exc: + if not self.is_closed: + log.debug("Exception during read select() for %s: %s", + self, exc) + self.defunct(exc) + return + + try: + buf = self._socket.recv(self.in_buffer_size) + self._iobuf.write(buf) + except socket.error as err: + if not is_timeout(err): + log.debug("Exception during socket recv for %s: %s", + self, err) + self.defunct(err) + return # leave the read loop + + if self._iobuf.tell(): + self.process_io_buffer() + else: + log.debug("Connection %s closed by server", self) + self.close() + return + + def push(self, data): + chunk_size = self.out_buffer_size + for i in moves.xrange(0, len(data), chunk_size): + self._write_queue.put(data[i:i + chunk_size]) + + def register_watcher(self, event_type, callback, register_timeout=None): + self._push_watchers[event_type].add(callback) + self.wait_for_response( + cassandra_protocol.RegisterMessage(event_list=[event_type]), + timeout=register_timeout) + + def register_watchers(self, type_callback_dict, register_timeout=None): + for event_type, callback in type_callback_dict.items(): + self._push_watchers[event_type].add(callback) + self.wait_for_response( + cassandra_protocol.RegisterMessage( + event_list=type_callback_dict.keys()), + timeout=register_timeout) diff --git a/setup.py b/setup.py index 11cfd4c5e2..ec7f6edd74 100644 --- a/setup.py +++ b/setup.py @@ -20,6 +20,11 @@ from gevent.monkey import patch_all patch_all() +if __name__ == '__main__' and sys.argv[1] == "eventlet_nosetests": + print("Running eventlet tests") + from eventlet import monkey_patch + monkey_patch() + import ez_setup ez_setup.use_setuptools() @@ -56,6 +61,15 @@ class gevent_nosetests(nosetests): description = "run nosetests with gevent monkey patching" +try: + from nose.commands import nosetests +except ImportError: + eventlet_nosetests = None +else: + class eventlet_nosetests(nosetests): + description = "run nosetests with eventlet monkey patching" + + class DocCommand(Command): description = "generate or test documentation" @@ -178,6 +192,9 @@ def run_setup(extensions): if gevent_nosetests is not None: kw['cmdclass']['gevent_nosetests'] = gevent_nosetests + if eventlet_nosetests is not None: + kw['cmdclass']['eventlet_nosetests'] = eventlet_nosetests + if extensions: kw['cmdclass']['build_ext'] = build_extensions kw['ext_modules'] = extensions diff --git a/tests/integration/standard/test_connection.py b/tests/integration/standard/test_connection.py index d17f692a7f..0e9d8d4f87 100644 --- a/tests/integration/standard/test_connection.py +++ b/tests/integration/standard/test_connection.py @@ -34,10 +34,16 @@ except ImportError: LibevConnection = None - def setup_module(): use_singledc() +def is_monkey_patched(): + if 'gevent.monkey' in sys.modules: + return True + if 'eventlet.patcher' in sys.modules: + import eventlet + return eventlet.patcher.is_monkey_patched('socket') + return False class ConnectionTests(object): @@ -230,8 +236,8 @@ class AsyncoreConnectionTests(ConnectionTests, unittest.TestCase): klass = AsyncoreConnection def setUp(self): - if 'gevent.monkey' in sys.modules: - raise unittest.SkipTest("Can't test asyncore with gevent monkey patching") + if is_monkey_patched(): + raise unittest.SkipTest("Can't test asyncore with monkey patching") ConnectionTests.setUp(self) @@ -240,9 +246,10 @@ class LibevConnectionTests(ConnectionTests, unittest.TestCase): klass = LibevConnection def setUp(self): - if 'gevent.monkey' in sys.modules: - raise unittest.SkipTest("Can't test libev with gevent monkey patching") + if is_monkey_patched(): + raise unittest.SkipTest("Can't test libev with monkey patching") if LibevConnection is None: raise unittest.SkipTest( 'libev does not appear to be installed properly') ConnectionTests.setUp(self) + From 53d93ddb4f23242952ba64155a353d9c2383c4af Mon Sep 17 00:00:00 2001 From: Wu Jiang Date: Tue, 20 Jan 2015 13:33:29 -0500 Subject: [PATCH 1155/3726] Fixed docstring typo A small typo fix for `set_max_connections_per_host`'s docstring. --- cassandra/cluster.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 1bad5fb447..96eb621438 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -633,7 +633,7 @@ def get_max_connections_per_host(self, host_distance): def set_max_connections_per_host(self, host_distance, max_connections): """ - Gets the maximum number of connections per Session that will be opened + Sets the maximum number of connections per Session that will be opened for each host with :class:`~.HostDistance` equal to `host_distance`. The default is 2 for :attr:`~HostDistance.LOCAL` and 1 for :attr:`~HostDistance.REMOTE`. From f92df5b750db212d208642c2d18ea68d7bc147fc Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 20 Jan 2015 13:32:52 -0600 Subject: [PATCH 1156/3726] Make import style match project. --- cassandra/io/eventletreactor.py | 56 ++++++++++++++++----------------- 1 file changed, 27 insertions(+), 29 deletions(-) diff --git a/cassandra/io/eventletreactor.py b/cassandra/io/eventletreactor.py index ac918ff8a4..357d8634e9 100644 --- a/cassandra/io/eventletreactor.py +++ b/cassandra/io/eventletreactor.py @@ -13,22 +13,24 @@ # See the License for the specific language governing permissions and # limitations under the License. -import collections -import eventlet -from eventlet.green import select -from eventlet.green import socket -from eventlet import queue -import errno -import functools -from six import moves -import threading +# Originally derived from MagnetoDB source: +# https://github.com/stackforge/magnetodb/blob/2015.1.0b1/magnetodb/common/cassandra/io/eventletreactor.py +from collections import defaultdict +from errno import EALREADY, EINPROGRESS, EWOULDBLOCK, EINVAL +import eventlet +from eventlet.green import select, socket +from eventlet.queue import Queue +from functools import partial import logging import os +from threading import Event -import cassandra -from cassandra import connection as cassandra_connection -from cassandra import protocol as cassandra_protocol +from six.moves import xrange + +from cassandra import OperationTimedOut +from cassandra.connection import Connection, ConnectionShutdown +from cassandra.protocol import RegisterMessage log = logging.getLogger(__name__) @@ -36,12 +38,12 @@ def is_timeout(err): return ( - err in (errno.EINPROGRESS, errno.EALREADY, errno.EWOULDBLOCK) or - (err == errno.EINVAL and os.name in ('nt', 'ce')) + err in (EINPROGRESS, EALREADY, EWOULDBLOCK) or + (err == EINVAL and os.name in ('nt', 'ce')) ) -class EventletConnection(cassandra_connection.Connection): +class EventletConnection(Connection): """ An implementation of :class:`.Connection` that utilizes ``eventlet``. """ @@ -64,18 +66,18 @@ def factory(cls, *args, **kwargs): raise conn.last_error elif not conn.connected_event.is_set(): conn.close() - raise cassandra.OperationTimedOut("Timed out creating connection") + raise OperationTimedOut("Timed out creating connection") else: return conn def __init__(self, *args, **kwargs): - cassandra_connection.Connection.__init__(self, *args, **kwargs) + Connection.__init__(self, *args, **kwargs) - self.connected_event = threading.Event() - self._write_queue = queue.Queue() + self.connected_event = Event() + self._write_queue = Queue() self._callbacks = {} - self._push_watchers = collections.defaultdict(set) + self._push_watchers = defaultdict(set) sockerr = None addresses = socket.getaddrinfo( @@ -125,10 +127,7 @@ def close(self): if not self.is_defunct: self.error_all_callbacks( - cassandra_connection.ConnectionShutdown( - "Connection to %s was closed" % self.host - ) - ) + ConnectionShutdown("Connection to %s was closed" % self.host)) # don't leave in-progress operations hanging self.connected_event.set() @@ -147,7 +146,7 @@ def handle_write(self): return # Leave the write loop def handle_read(self): - run_select = functools.partial(select.select, (self._socket,), (), ()) + run_select = partial(select.select, (self._socket,), (), ()) while True: try: run_select() @@ -177,19 +176,18 @@ def handle_read(self): def push(self, data): chunk_size = self.out_buffer_size - for i in moves.xrange(0, len(data), chunk_size): + for i in xrange(0, len(data), chunk_size): self._write_queue.put(data[i:i + chunk_size]) def register_watcher(self, event_type, callback, register_timeout=None): self._push_watchers[event_type].add(callback) self.wait_for_response( - cassandra_protocol.RegisterMessage(event_list=[event_type]), + RegisterMessage(event_list=[event_type]), timeout=register_timeout) def register_watchers(self, type_callback_dict, register_timeout=None): for event_type, callback in type_callback_dict.items(): self._push_watchers[event_type].add(callback) self.wait_for_response( - cassandra_protocol.RegisterMessage( - event_list=type_callback_dict.keys()), + RegisterMessage(event_list=type_callback_dict.keys()), timeout=register_timeout) From 2e905f843e0b80c1f48c0e97882ef5be746cad20 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 20 Jan 2015 13:33:46 -0600 Subject: [PATCH 1157/3726] is_monkey_patched as a common function, use in asyncore unit test. --- tests/__init__.py | 16 ++++++++++++++++ tests/integration/standard/test_connection.py | 17 +++++------------ tests/unit/io/test_asyncorereactor.py | 8 ++++---- 3 files changed, 25 insertions(+), 16 deletions(-) diff --git a/tests/__init__.py b/tests/__init__.py index 64d56ea853..5de2c7654b 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -13,6 +13,7 @@ # limitations under the License. import logging +import sys log = logging.getLogger() log.setLevel('DEBUG') @@ -21,3 +22,18 @@ handler = logging.StreamHandler() handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s [%(module)s:%(lineno)s]: %(message)s')) log.addHandler(handler) + + +def is_gevent_monkey_patched(): + return 'gevent.monkey' in sys.modules + + +def is_eventlet_monkey_patched(): + if 'eventlet.patcher' in sys.modules: + import eventlet + return eventlet.patcher.is_monkey_patched('socket') + return False + + +def is_monkey_patched(): + return is_gevent_monkey_patched() or is_eventlet_monkey_patched() diff --git a/tests/integration/standard/test_connection.py b/tests/integration/standard/test_connection.py index 0e9d8d4f87..15e2b1988b 100644 --- a/tests/integration/standard/test_connection.py +++ b/tests/integration/standard/test_connection.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from tests.integration import use_singledc, PROTOCOL_VERSION - try: import unittest2 as unittest except ImportError: @@ -21,29 +19,25 @@ from functools import partial from six.moves import range -import sys from threading import Thread, Event from cassandra import ConsistencyLevel, OperationTimedOut from cassandra.cluster import NoHostAvailable -from cassandra.protocol import QueryMessage from cassandra.io.asyncorereactor import AsyncoreConnection +from cassandra.protocol import QueryMessage + +from tests import is_monkey_patched +from tests.integration import use_singledc, PROTOCOL_VERSION try: from cassandra.io.libevreactor import LibevConnection except ImportError: LibevConnection = None + def setup_module(): use_singledc() -def is_monkey_patched(): - if 'gevent.monkey' in sys.modules: - return True - if 'eventlet.patcher' in sys.modules: - import eventlet - return eventlet.patcher.is_monkey_patched('socket') - return False class ConnectionTests(object): @@ -252,4 +246,3 @@ def setUp(self): raise unittest.SkipTest( 'libev does not appear to be installed properly') ConnectionTests.setUp(self) - diff --git a/tests/unit/io/test_asyncorereactor.py b/tests/unit/io/test_asyncorereactor.py index 6acb1ef839..c2138aa5fa 100644 --- a/tests/unit/io/test_asyncorereactor.py +++ b/tests/unit/io/test_asyncorereactor.py @@ -31,20 +31,20 @@ from cassandra.connection import (HEADER_DIRECTION_TO_CLIENT, ConnectionException) - +from cassandra.io.asyncorereactor import AsyncoreConnection from cassandra.protocol import (write_stringmultimap, write_int, write_string, SupportedMessage, ReadyMessage, ServerError) from cassandra.marshal import uint8_pack, uint32_pack, int32_pack -from cassandra.io.asyncorereactor import AsyncoreConnection +from tests import is_monkey_patched class AsyncoreConnectionTest(unittest.TestCase): @classmethod def setUpClass(cls): - if 'gevent.monkey' in sys.modules: - raise unittest.SkipTest("gevent monkey-patching detected") + if is_monkey_patched(): + raise unittest.SkipTest("monkey-patching detected") AsyncoreConnection.initialize_reactor() cls.socket_patcher = patch('socket.socket', spec=socket.socket) cls.mock_socket = cls.socket_patcher.start() From 2337cc09092e7a84602d66937b5f0e9028a4686b Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 20 Jan 2015 13:55:32 -0600 Subject: [PATCH 1158/3726] Add EventletConnection to API docs. --- docs/api/cassandra/io/eventletreactor.rst | 7 +++++++ docs/api/index.rst | 1 + 2 files changed, 8 insertions(+) create mode 100644 docs/api/cassandra/io/eventletreactor.rst diff --git a/docs/api/cassandra/io/eventletreactor.rst b/docs/api/cassandra/io/eventletreactor.rst new file mode 100644 index 0000000000..1ba742c7e9 --- /dev/null +++ b/docs/api/cassandra/io/eventletreactor.rst @@ -0,0 +1,7 @@ +``cassandra.io.eventletreactor`` - ``eventlet``-compatible Connection +===================================================================== + +.. module:: cassandra.io.eventletreactor + +.. autoclass:: EventletConnection + :members: diff --git a/docs/api/index.rst b/docs/api/index.rst index d65555559d..27aebf0f9a 100644 --- a/docs/api/index.rst +++ b/docs/api/index.rst @@ -18,6 +18,7 @@ API Documentation cassandra/connection cassandra/util cassandra/io/asyncorereactor + cassandra/io/eventletreactor cassandra/io/libevreactor cassandra/io/geventreactor cassandra/io/twistedreactor From 0ddf694a8c446f7c3f4a31eb84dd8d56fcb4ef69 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 20 Jan 2015 14:23:46 -0600 Subject: [PATCH 1159/3726] Refine DateType unit test --- tests/unit/test_types.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/tests/unit/test_types.py b/tests/unit/test_types.py index 24b1a66724..dc43eda1f9 100644 --- a/tests/unit/test_types.py +++ b/tests/unit/test_types.py @@ -180,11 +180,14 @@ def test_cassandratype_base(self): self.assertEqual(cassandra_type.val, 'randomvaluetocheck') def test_datetype(self): - now_timestamp = time.time() - now_datetime = datetime.datetime.utcfromtimestamp(now_timestamp) + now_time_seconds = time.time() + now_datetime = datetime.datetime.utcfromtimestamp(now_time_seconds) - # same results serialized (must scale the timestamp to milliseconds) - self.assertEqual(DateType.serialize(now_datetime, 0), DateType.serialize(now_timestamp * 1e3, 0)) + # Cassandra timestamps in millis + now_timestamp = now_time_seconds * 1e3 + + # same results serialized + self.assertEqual(DateType.serialize(now_datetime, 0), DateType.serialize(now_timestamp, 0)) # from timestamp date_type = DateType(now_timestamp) From a44d1e40017fed0e6acdd458735c86803ae54fb6 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 20 Jan 2015 17:22:46 -0600 Subject: [PATCH 1160/3726] Make refresh schema wait parameter match name in Cluster init --- cassandra/cluster.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index c6086e8739..2236306909 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1073,16 +1073,16 @@ def _ensure_core_connections(self): for pool in session._pools.values(): pool.ensure_core_connections() - def refresh_schema(self, keyspace=None, table=None, usertype=None, schema_agreement_wait=None): + def refresh_schema(self, keyspace=None, table=None, usertype=None, max_schema_agreement_wait=None): """ Synchronously refresh the schema metadata. By default timeout for this operation is governed by :attr:`~.Cluster.max_schema_agreement_wait` and :attr:`~.Cluster.control_connection_timeout`. - Passing schema_agreement_wait here overrides :attr:`~.Cluster.max_schema_agreement_wait`. - Setting schema_agreement_wait <= 0 will bypass schema agreement and refresh schema immediately. + Passing max_schema_agreement_wait here overrides :attr:`~.Cluster.max_schema_agreement_wait`. + Setting max_schema_agreement_wait <= 0 will bypass schema agreement and refresh schema immediately. An Exception is raised if schema refresh fails for any reason. """ - if not self.control_connection.refresh_schema(keyspace, table, usertype, schema_agreement_wait): + if not self.control_connection.refresh_schema(keyspace, table, usertype, max_schema_agreement_wait): raise Exception("Schema was not refreshed. See log for details.") def submit_schema_refresh(self, keyspace=None, table=None, usertype=None): From 4eeb5bced922fc428648b97a4d3080a873232e92 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 20 Jan 2015 17:23:42 -0600 Subject: [PATCH 1161/3726] Tests around Cluster.refresh_schema --- tests/integration/standard/test_cluster.py | 159 ++++++++++++++++++++- 1 file changed, 155 insertions(+), 4 deletions(-) diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index 957a421434..9bddeedab4 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -12,18 +12,22 @@ # See the License for the specific language governing permissions and # limitations under the License. -from tests.integration import use_singledc, PROTOCOL_VERSION - try: import unittest2 as unittest except ImportError: import unittest # noqa +import time +from uuid import uuid4 + import cassandra +from cassandra.cluster import Cluster, NoHostAvailable +from cassandra.policies import (RoundRobinPolicy, ExponentialReconnectionPolicy, + RetryPolicy, SimpleConvictionPolicy, HostDistance, + WhiteListRoundRobinPolicy) from cassandra.query import SimpleStatement, TraceUnavailable -from cassandra.policies import RoundRobinPolicy, ExponentialReconnectionPolicy, RetryPolicy, SimpleConvictionPolicy, HostDistance -from cassandra.cluster import Cluster, NoHostAvailable +from tests.integration import use_singledc, PROTOCOL_VERSION, get_server_versions def setup_module(): @@ -202,6 +206,153 @@ def test_submit_schema_refresh(self): self.assertIn("newkeyspace", cluster.metadata.keyspaces) + def test_refresh_schema(self): + cluster = Cluster(protocol_version=PROTOCOL_VERSION) + session = cluster.connect() + + original_meta = cluster.metadata.keyspaces + # full schema refresh, with wait + cluster.refresh_schema() + self.assertIsNot(original_meta, cluster.metadata.keyspaces) + self.assertEqual(original_meta, cluster.metadata.keyspaces) + + session.shutdown() + + def test_refresh_schema_keyspace(self): + cluster = Cluster(protocol_version=PROTOCOL_VERSION) + session = cluster.connect() + + original_meta = cluster.metadata.keyspaces + original_system_meta = original_meta['system'] + + # only refresh one keyspace + cluster.refresh_schema(keyspace='system') + current_meta = cluster.metadata.keyspaces + self.assertIs(original_meta, current_meta) + current_system_meta = current_meta['system'] + self.assertIsNot(original_system_meta, current_system_meta) + self.assertEqual(original_system_meta.as_cql_query(), current_system_meta.as_cql_query()) + session.shutdown() + + def test_refresh_schema_table(self): + cluster = Cluster(protocol_version=PROTOCOL_VERSION) + session = cluster.connect() + + original_meta = cluster.metadata.keyspaces + original_system_meta = original_meta['system'] + original_system_schema_meta = original_system_meta.tables['schema_columnfamilies'] + + # only refresh one table + cluster.refresh_schema(keyspace='system', table='schema_columnfamilies') + current_meta = cluster.metadata.keyspaces + current_system_meta = current_meta['system'] + current_system_schema_meta = current_system_meta.tables['schema_columnfamilies'] + self.assertIs(original_meta, current_meta) + self.assertIs(original_system_meta, current_system_meta) + self.assertIsNot(original_system_schema_meta, current_system_schema_meta) + self.assertEqual(original_system_schema_meta.as_cql_query(), current_system_schema_meta.as_cql_query()) + session.shutdown() + + def test_refresh_schema_type(self): + if get_server_versions()[0] < (2, 1, 0): + raise unittest.SkipTest('UDTs were introduced in Cassandra 2.1') + + cluster = Cluster(protocol_version=PROTOCOL_VERSION) + session = cluster.connect() + + keyspace_name = 'test1rf' + type_name = self._testMethodName + + session.execute('CREATE TYPE IF NOT EXISTS %s.%s (one int, two text)' % (keyspace_name, type_name)) + original_meta = cluster.metadata.keyspaces + original_test1rf_meta = original_meta[keyspace_name] + original_type_meta = original_test1rf_meta.user_types[type_name] + + # only refresh one type + cluster.refresh_schema(keyspace='test1rf', usertype=type_name) + current_meta = cluster.metadata.keyspaces + current_test1rf_meta = current_meta[keyspace_name] + current_type_meta = current_test1rf_meta.user_types[type_name] + self.assertIs(original_meta, current_meta) + self.assertIs(original_test1rf_meta, current_test1rf_meta) + self.assertIsNot(original_type_meta, current_type_meta) + self.assertEqual(original_type_meta.as_cql_query(), current_type_meta.as_cql_query()) + session.shutdown() + + def test_refresh_schema_no_wait(self): + + contact_points = ['127.0.0.1'] + cluster = Cluster(protocol_version=PROTOCOL_VERSION, max_schema_agreement_wait=10, + contact_points=contact_points, load_balancing_policy=WhiteListRoundRobinPolicy(contact_points)) + session = cluster.connect() + + schema_ver = session.execute("SELECT schema_version FROM system.local WHERE key='local'")[0][0] + + # create a schema disagreement + session.execute("UPDATE system.local SET schema_version=%s WHERE key='local'", (uuid4(),)) + + try: + agreement_timeout = 1 + + # cluster agreement wait exceeded + c = Cluster(protocol_version=PROTOCOL_VERSION, max_schema_agreement_wait=agreement_timeout) + start_time = time.time() + s = c.connect() + end_time = time.time() + self.assertGreaterEqual(end_time - start_time, agreement_timeout) + self.assertTrue(c.metadata.keyspaces) + + # cluster agreement wait used for refresh + original_meta = c.metadata.keyspaces + start_time = time.time() + self.assertRaisesRegexp(Exception, r"Schema was not refreshed.*", c.refresh_schema) + end_time = time.time() + self.assertGreaterEqual(end_time - start_time, agreement_timeout) + self.assertIs(original_meta, c.metadata.keyspaces) + + # refresh wait overrides cluster value + original_meta = c.metadata.keyspaces + start_time = time.time() + c.refresh_schema(max_schema_agreement_wait=0) + end_time = time.time() + self.assertLess(end_time - start_time, agreement_timeout) + self.assertIsNot(original_meta, c.metadata.keyspaces) + self.assertEqual(original_meta, c.metadata.keyspaces) + + s.shutdown() + + refresh_threshold = 0.5 + # cluster agreement bypass + c = Cluster(protocol_version=PROTOCOL_VERSION, max_schema_agreement_wait=0) + start_time = time.time() + s = c.connect() + end_time = time.time() + self.assertLess(end_time - start_time, refresh_threshold) + self.assertTrue(c.metadata.keyspaces) + + # cluster agreement wait used for refresh + original_meta = c.metadata.keyspaces + start_time = time.time() + c.refresh_schema() + end_time = time.time() + self.assertLess(end_time - start_time, refresh_threshold) + self.assertIsNot(original_meta, c.metadata.keyspaces) + self.assertEqual(original_meta, c.metadata.keyspaces) + + # refresh wait overrides cluster value + original_meta = c.metadata.keyspaces + start_time = time.time() + self.assertRaisesRegexp(Exception, r"Schema was not refreshed.*", c.refresh_schema, max_schema_agreement_wait=agreement_timeout) + end_time = time.time() + self.assertGreaterEqual(end_time - start_time, agreement_timeout) + self.assertIs(original_meta, c.metadata.keyspaces) + + s.shutdown() + finally: + session.execute("UPDATE system.local SET schema_version=%s WHERE key='local'", (schema_ver,)) + + session.shutdown() + def test_trace(self): """ Ensure trace can be requested for async and non-async queries From bb1155e18077da36276cd9545de1bdc4d1a0ff7e Mon Sep 17 00:00:00 2001 From: andy8zhao Date: Wed, 21 Jan 2015 09:07:30 -0500 Subject: [PATCH 1162/3726] 1) fix save/update/delete for static column changes only when clustering key is null, and 2) allow null clustering key for create() when setting static column values only --- cqlengine/columns.py | 5 ++-- cqlengine/models.py | 7 ++++- cqlengine/query.py | 27 +++++++++++++++---- .../tests/columns/test_container_columns.py | 2 +- cqlengine/tests/columns/test_static_column.py | 24 ++++++++++++++--- 5 files changed, 51 insertions(+), 14 deletions(-) diff --git a/cqlengine/columns.py b/cqlengine/columns.py index e5d63baef0..bb2d77bb87 100644 --- a/cqlengine/columns.py +++ b/cqlengine/columns.py @@ -26,6 +26,7 @@ def __init__(self, instance, column, value): self.column = column self.previous_value = deepcopy(value) self.value = value + self.explicit = False @property def deleted(self): @@ -140,9 +141,7 @@ def validate(self, value): if there's a problem """ if value is None: - if self.has_default: - return self.get_default() - elif self.required: + if self.required: raise ValidationError('{} - None values are not allowed'.format(self.column_name or self.db_field)) return value diff --git a/cqlengine/models.py b/cqlengine/models.py index d7f928dda9..d541f1a962 100644 --- a/cqlengine/models.py +++ b/cqlengine/models.py @@ -336,6 +336,8 @@ def __init__(self, **values): if value is not None or isinstance(column, columns.BaseContainerColumn): value = column.to_python(value) value_mngr = column.value_manager(self, column, value) + if name in values: + value_mngr.explicit = True self._values[name] = value_mngr # a flag set by the deserializer to indicate @@ -490,7 +492,10 @@ def column_family_name(cls, include_keyspace=True): def validate(self): """ Cleans and validates the field values """ for name, col in self._columns.items(): - val = col.validate(getattr(self, name)) + v = getattr(self, name) + if v is None and not self._values[name].explicit and col.has_default: + v = col.get_default() + val = col.validate(v) setattr(self, name, val) ### Let an instance be used like a dict of its columns keys/values diff --git a/cqlengine/query.py b/cqlengine/query.py index 5f4c2cb593..2877453a6a 100644 --- a/cqlengine/query.py +++ b/cqlengine/query.py @@ -806,7 +806,9 @@ def update(self, **values): if col.is_primary_key: raise ValidationError("Cannot apply update to primary key '{}' for {}.{}".format(col_name, self.__module__, self.model.__name__)) + # we should not provide default values in this use case. val = col.validate(val) + if val is None: nulled_columns.add(col_name) continue @@ -914,11 +916,17 @@ def update(self): if self.instance is None: raise CQLEngineException("DML Query intance attribute is None") assert type(self.instance) == self.model - static_update_only = True + null_clustering_key = False if len(self.instance._clustering_keys) == 0 else True + static_changed_only = True statement = UpdateStatement(self.column_family_name, ttl=self._ttl, timestamp=self._timestamp, transactions=self._transaction) + for name, col in self.instance._clustering_keys.items(): + null_clustering_key = null_clustering_key and col._val_is_null(getattr(self.instance, name, None)) #get defined fields and their column names for name, col in self.model._columns.items(): + # if clustering key is null, don't include non static columns + if null_clustering_key and not col.static and not col.partition_key: + continue if not col.is_primary_key: val = getattr(self.instance, name, None) val_mgr = self.instance._values[name] @@ -931,7 +939,7 @@ def update(self): if not val_mgr.changed and not isinstance(col, Counter): continue - static_update_only = (static_update_only and col.static) + static_changed_only = static_changed_only and col.static if isinstance(col, (BaseContainerColumn, Counter)): # get appropriate clause if isinstance(col, List): klass = ListUpdateClause @@ -953,7 +961,8 @@ def update(self): if statement.get_context_size() > 0 or self.instance._has_counter: for name, col in self.model._primary_keys.items(): - if static_update_only and (not col.partition_key): + # only include clustering key if clustering key is not null, and non static columns are changed to avoid cql error + if (null_clustering_key or static_changed_only) and (not col.partition_key): continue statement.add_where_clause(WhereClause( col.db_field_name, @@ -962,7 +971,8 @@ def update(self): )) self._execute(statement) - self._delete_null_columns() + if not null_clustering_key: + self._delete_null_columns() def save(self): """ @@ -980,7 +990,12 @@ def save(self): return self.update() else: insert = InsertStatement(self.column_family_name, ttl=self._ttl, timestamp=self._timestamp, if_not_exists=self._if_not_exists) + static_save_only = False if len(self.instance._clustering_keys) == 0 else True + for name, col in self.instance._clustering_keys.items(): + static_save_only = static_save_only and col._val_is_null(getattr(self.instance, name, None)) for name, col in self.instance._columns.items(): + if static_save_only and not col.static and not col.partition_key: + continue val = getattr(self.instance, name, None) if col._val_is_null(val): if self.instance._values[name].changed: @@ -996,7 +1011,8 @@ def save(self): if not insert.is_empty: self._execute(insert) # delete any nulled columns - self._delete_null_columns() + if not static_save_only: + self._delete_null_columns() def delete(self): """ Deletes one instance """ @@ -1005,6 +1021,7 @@ def delete(self): ds = DeleteStatement(self.column_family_name, timestamp=self._timestamp) for name, col in self.model._primary_keys.items(): + if (not col.partition_key) and (getattr(self.instance, name) is None): continue ds.add_where_clause(WhereClause( col.db_field_name, EqualsOperator(), diff --git a/cqlengine/tests/columns/test_container_columns.py b/cqlengine/tests/columns/test_container_columns.py index f26962ed27..f8c0db595b 100644 --- a/cqlengine/tests/columns/test_container_columns.py +++ b/cqlengine/tests/columns/test_container_columns.py @@ -529,4 +529,4 @@ def tearDownClass(cls): drop_table(TestCamelMapModel) def test_camelcase_column(self): - TestCamelMapModel.create(partition=None, camelMap={'blah': 1}) + TestCamelMapModel.create(camelMap={'blah': 1}) diff --git a/cqlengine/tests/columns/test_static_column.py b/cqlengine/tests/columns/test_static_column.py index d25c0cf0f9..cf07378416 100644 --- a/cqlengine/tests/columns/test_static_column.py +++ b/cqlengine/tests/columns/test_static_column.py @@ -1,3 +1,6 @@ +#import sys, nose +#sys.path.insert(0, '/Users/andy/projects/cqlengine') + from uuid import uuid4 from unittest import skipUnless from cqlengine import Model @@ -9,9 +12,8 @@ class TestStaticModel(Model): - partition = columns.UUID(primary_key=True, default=uuid4) - cluster = columns.UUID(primary_key=True, default=uuid4) + cluster = columns.UUID(primary_key=True) static = columns.Text(static=True) text = columns.Text() @@ -33,7 +35,7 @@ def tearDownClass(cls): @skipUnless(PROTOCOL_VERSION >= 2, "only runs against the cql3 protocol v2.0") def test_mixed_updates(self): """ Tests that updates on both static and non-static columns work as intended """ - instance = TestStaticModel.create() + instance = TestStaticModel.create(cluster=uuid4()) instance.static = "it's shared" instance.text = "some text" instance.save() @@ -49,7 +51,7 @@ def test_mixed_updates(self): @skipUnless(PROTOCOL_VERSION >= 2, "only runs against the cql3 protocol v2.0") def test_static_only_updates(self): """ Tests that updates on static only column work as intended """ - instance = TestStaticModel.create() + instance = TestStaticModel.create(cluster=uuid4()) instance.static = "it's shared" instance.text = "some text" instance.save() @@ -60,3 +62,17 @@ def test_static_only_updates(self): actual = TestStaticModel.get(partition=u.partition) assert actual.static == "it's still shared" + @skipUnless(PROTOCOL_VERSION >= 2, "only runs against the cql3 protocol v2.0") + def test_static_with_null_cluster_key(self): + """ Tests that save/update/delete works for static column works when clustering key is null""" + instance = TestStaticModel.create(cluster=None, static = "it's shared") + instance.save() + + u = TestStaticModel.get(partition=instance.partition) + u.static = "it's still shared" + u.update() + actual = TestStaticModel.get(partition=u.partition) + assert actual.static == "it's still shared" + +#if __name__ == '__main__': +# nose.main() \ No newline at end of file From b45ecabc2c729d65d3a2e6fd8967b74262a48e2a Mon Sep 17 00:00:00 2001 From: andy8zhao Date: Wed, 21 Jan 2015 09:22:15 -0500 Subject: [PATCH 1163/3726] changed test case --- cqlengine/tests/columns/test_static_column.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cqlengine/tests/columns/test_static_column.py b/cqlengine/tests/columns/test_static_column.py index cf07378416..1664af188d 100644 --- a/cqlengine/tests/columns/test_static_column.py +++ b/cqlengine/tests/columns/test_static_column.py @@ -13,7 +13,7 @@ class TestStaticModel(Model): partition = columns.UUID(primary_key=True, default=uuid4) - cluster = columns.UUID(primary_key=True) + cluster = columns.UUID(primary_key=True, default=uuid4) static = columns.Text(static=True) text = columns.Text() From fcad88871ecb5a75e85c4db7c83c2a49dcbbae75 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 21 Jan 2015 13:47:38 -0600 Subject: [PATCH 1164/3726] ConnectionHeartbeat unit tests --- cassandra/connection.py | 2 +- tests/unit/test_connection.py | 164 ++++++++++++++++++++++++++++++++-- 2 files changed, 159 insertions(+), 7 deletions(-) diff --git a/cassandra/connection.py b/cassandra/connection.py index b603f56b1e..a7d98ecb3e 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -751,7 +751,7 @@ def __init__(self, connection, owner): self._event = Event() self.connection = connection self.owner = owner - log.debug("Sending options message heartbeat on idle connection %s %s", + log.debug("Sending options message heartbeat on idle connection (%s) %s", id(connection), connection.host) with connection.lock: if connection.in_flight < connection.max_request_id: diff --git a/tests/unit/test_connection.py b/tests/unit/test_connection.py index 6bc50c2282..39ef8ce5c0 100644 --- a/tests/unit/test_connection.py +++ b/tests/unit/test_connection.py @@ -11,21 +11,21 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import six - try: import unittest2 as unittest except ImportError: - import unittest # noqa + import unittest # noqa +from mock import Mock, ANY, call +import six from six import BytesIO - -from mock import Mock, ANY +import time +from threading import Lock from cassandra.cluster import Cluster from cassandra.connection import (Connection, HEADER_DIRECTION_TO_CLIENT, HEADER_DIRECTION_FROM_CLIENT, ProtocolError, - locally_supported_compressions) + locally_supported_compressions, ConnectionHeartbeat) from cassandra.marshal import uint8_pack, uint32_pack from cassandra.protocol import (write_stringmultimap, write_int, write_string, SupportedMessage) @@ -280,3 +280,155 @@ def test_set_keyspace_blocking(self): def test_set_connection_class(self): cluster = Cluster(connection_class='test') self.assertEqual('test', cluster.connection_class) + + +class ConnectionHeartbeatTest(unittest.TestCase): + + @staticmethod + def make_get_holders(len): + holders = [] + for _ in range(len): + holder = Mock() + holder.get_connections = Mock(return_value=[]) + holders.append(holder) + get_holders = Mock(return_value=holders) + return get_holders + + def run_heartbeat(self, get_holders_fun, count=2, interval=0.05): + ch = ConnectionHeartbeat(interval, get_holders_fun) + time.sleep(interval * count) + ch.stop() + ch.join() + self.assertTrue(get_holders_fun.call_count) + + def test_empty_connections(self): + count = 3 + get_holders = self.make_get_holders(1) + + self.run_heartbeat(get_holders, count) + + self.assertGreaterEqual(get_holders.call_count, count - 1) # lower bound to account for thread spinup time + self.assertLessEqual(get_holders.call_count, count) + holder = get_holders.return_value[0] + holder.get_connections.assert_has_calls([call()] * get_holders.call_count) + + def test_idle_non_idle(self): + request_id = 999 + + # connection.send_msg(OptionsMessage(), connection.get_request_id(), self._options_callback) + def send_msg(msg, req_id, msg_callback): + msg_callback(SupportedMessage([], {})) + + idle_connection = Mock(spec=Connection, host='localhost', + max_request_id=127, + lock=Lock(), + in_flight=0, is_idle=True, + is_defunct=False, is_closed=False, + get_request_id=lambda: request_id, + send_msg=Mock(side_effect=send_msg)) + non_idle_connection = Mock(spec=Connection, in_flight=0, is_idle=False, is_defunct=False, is_closed=False) + + get_holders = self.make_get_holders(1) + holder = get_holders.return_value[0] + holder.get_connections.return_value.append(idle_connection) + holder.get_connections.return_value.append(non_idle_connection) + + self.run_heartbeat(get_holders) + + holder.get_connections.assert_has_calls([call()] * get_holders.call_count) + self.assertEqual(idle_connection.in_flight, 0) + self.assertEqual(non_idle_connection.in_flight, 0) + + idle_connection.send_msg.assert_has_calls([call(ANY, request_id, ANY)] * get_holders.call_count) + self.assertEqual(non_idle_connection.send_msg.call_count, 0) + + def test_closed_defunct(self): + get_holders = self.make_get_holders(1) + closed_connection = Mock(spec=Connection, in_flight=0, is_idle=False, is_defunct=False, is_closed=True) + defunct_connection = Mock(spec=Connection, in_flight=0, is_idle=False, is_defunct=True, is_closed=False) + holder = get_holders.return_value[0] + holder.get_connections.return_value.append(closed_connection) + holder.get_connections.return_value.append(defunct_connection) + + self.run_heartbeat(get_holders) + + holder.get_connections.assert_has_calls([call()] * get_holders.call_count) + self.assertEqual(closed_connection.in_flight, 0) + self.assertEqual(defunct_connection.in_flight, 0) + self.assertEqual(closed_connection.send_msg.call_count, 0) + self.assertEqual(defunct_connection.send_msg.call_count, 0) + + def test_no_req_ids(self): + in_flight = 3 + + get_holders = self.make_get_holders(1) + max_connection = Mock(spec=Connection, host='localhost', + max_request_id=in_flight, in_flight=in_flight, + is_idle=True, is_defunct=False, is_closed=False) + holder = get_holders.return_value[0] + holder.get_connections.return_value.append(max_connection) + + self.run_heartbeat(get_holders) + + holder.get_connections.assert_has_calls([call()] * get_holders.call_count) + self.assertEqual(max_connection.in_flight, in_flight) + self.assertEqual(max_connection.send_msg.call_count, 0) + self.assertEqual(max_connection.send_msg.call_count, 0) + max_connection.defunct.assert_has_calls([call(ANY)] * get_holders.call_count) + holder.return_connection.assert_has_calls([call(max_connection)] * get_holders.call_count) + + def test_unexpected_response(self): + request_id = 999 + + get_holders = self.make_get_holders(1) + + def send_msg(msg, req_id, msg_callback): + msg_callback(object()) + + connection = Mock(spec=Connection, host='localhost', + max_request_id=127, + lock=Lock(), + in_flight=0, is_idle=True, + is_defunct=False, is_closed=False, + get_request_id=lambda: request_id, + send_msg=Mock(side_effect=send_msg)) + holder = get_holders.return_value[0] + holder.get_connections.return_value.append(connection) + + self.run_heartbeat(get_holders) + + self.assertEqual(connection.in_flight, get_holders.call_count) + connection.send_msg.assert_has_calls([call(ANY, request_id, ANY)] * get_holders.call_count) + connection.defunct.assert_has_calls([call(ANY)] * get_holders.call_count) + exc = connection.defunct.call_args_list[0][0][0] + self.assertIsInstance(exc, Exception) + self.assertEqual(exc.args, Exception('Connection heartbeat failure').args) + holder.return_connection.assert_has_calls([call(connection)] * get_holders.call_count) + + def test_timeout(self): + request_id = 999 + + get_holders = self.make_get_holders(1) + + def send_msg(msg, req_id, msg_callback): + pass + + connection = Mock(spec=Connection, host='localhost', + max_request_id=127, + lock=Lock(), + in_flight=0, is_idle=True, + is_defunct=False, is_closed=False, + get_request_id=lambda: request_id, + send_msg=Mock(side_effect=send_msg)) + holder = get_holders.return_value[0] + holder.get_connections.return_value.append(connection) + + self.run_heartbeat(get_holders) + + self.assertEqual(connection.in_flight, get_holders.call_count) + connection.send_msg.assert_has_calls([call(ANY, request_id, ANY)] * get_holders.call_count) + connection.defunct.assert_has_calls([call(ANY)] * get_holders.call_count) + exc = connection.defunct.call_args_list[0][0][0] + self.assertIsInstance(exc, Exception) + self.assertEqual(exc.args, Exception('Connection heartbeat failure').args) + holder.return_connection.assert_has_calls([call(connection)] * get_holders.call_count) From d5e5572eeec7f7439a1d36365f245be449514432 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 21 Jan 2015 13:50:50 -0600 Subject: [PATCH 1165/3726] Minor cleanup --- cassandra/cluster.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 2236306909..2c5e45c34b 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -39,7 +39,7 @@ from cassandra.util import WeakSet # NOQA from functools import partial, wraps -from itertools import groupby, chain +from itertools import groupby from cassandra import (ConsistencyLevel, AuthenticationFailed, InvalidRequest, OperationTimedOut, @@ -69,8 +69,9 @@ BatchStatement, bind_params, QueryTrace, Statement, named_tuple_factory, dict_factory, FETCH_SIZE_UNSET) + def _is_eventlet_monkey_patched(): - if not 'eventlet.patcher' in sys.modules: + if 'eventlet.patcher' not in sys.modules: return False import eventlet.patcher return eventlet.patcher.is_monkey_patched('socket') From 2acfcbe885a402759e853ac1d8e75b4f3570b84b Mon Sep 17 00:00:00 2001 From: andy8zhao Date: Wed, 21 Jan 2015 16:22:02 -0500 Subject: [PATCH 1166/3726] rollback unwanted changes to test cases --- cqlengine/tests/columns/test_static_column.py | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/cqlengine/tests/columns/test_static_column.py b/cqlengine/tests/columns/test_static_column.py index 1664af188d..d7e6dc9b81 100644 --- a/cqlengine/tests/columns/test_static_column.py +++ b/cqlengine/tests/columns/test_static_column.py @@ -1,6 +1,3 @@ -#import sys, nose -#sys.path.insert(0, '/Users/andy/projects/cqlengine') - from uuid import uuid4 from unittest import skipUnless from cqlengine import Model @@ -35,7 +32,7 @@ def tearDownClass(cls): @skipUnless(PROTOCOL_VERSION >= 2, "only runs against the cql3 protocol v2.0") def test_mixed_updates(self): """ Tests that updates on both static and non-static columns work as intended """ - instance = TestStaticModel.create(cluster=uuid4()) + instance = TestStaticModel.create() instance.static = "it's shared" instance.text = "some text" instance.save() @@ -51,7 +48,7 @@ def test_mixed_updates(self): @skipUnless(PROTOCOL_VERSION >= 2, "only runs against the cql3 protocol v2.0") def test_static_only_updates(self): """ Tests that updates on static only column work as intended """ - instance = TestStaticModel.create(cluster=uuid4()) + instance = TestStaticModel.create() instance.static = "it's shared" instance.text = "some text" instance.save() @@ -73,6 +70,3 @@ def test_static_with_null_cluster_key(self): u.update() actual = TestStaticModel.get(partition=u.partition) assert actual.static == "it's still shared" - -#if __name__ == '__main__': -# nose.main() \ No newline at end of file From badad6d40fb8e39999d7ae252d862db0212029c9 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 21 Jan 2015 15:38:21 -0600 Subject: [PATCH 1167/3726] Integration tests for idle connection heartbeat. --- tests/integration/standard/test_cluster.py | 93 ++++++++++++++++++++++ 1 file changed, 93 insertions(+) diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index 9bddeedab4..7c23d81596 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -17,11 +17,14 @@ except ImportError: import unittest # noqa +from collections import deque +from mock import patch import time from uuid import uuid4 import cassandra from cassandra.cluster import Cluster, NoHostAvailable +from cassandra.concurrent import execute_concurrent from cassandra.policies import (RoundRobinPolicy, ExponentialReconnectionPolicy, RetryPolicy, SimpleConvictionPolicy, HostDistance, WhiteListRoundRobinPolicy) @@ -70,6 +73,8 @@ def test_basic(self): result = session.execute("SELECT * FROM clustertests.cf0") self.assertEqual([('a', 'b', 'c')], result) + session.execute("DROP KEYSPACE clustertests") + cluster.shutdown() def test_connect_on_keyspace(self): @@ -206,6 +211,8 @@ def test_submit_schema_refresh(self): self.assertIn("newkeyspace", cluster.metadata.keyspaces) + session.execute("DROP KEYSPACE newkeyspace") + def test_refresh_schema(self): cluster = Cluster(protocol_version=PROTOCOL_VERSION) session = cluster.connect() @@ -257,6 +264,10 @@ def test_refresh_schema_type(self): if get_server_versions()[0] < (2, 1, 0): raise unittest.SkipTest('UDTs were introduced in Cassandra 2.1') + if PROTOCOL_VERSION < 3: + raise unittest.SkipTest('UDTs are not specified in change events for protocol v2') + # We may want to refresh types on keyspace change events in that case(?) + cluster = Cluster(protocol_version=PROTOCOL_VERSION) session = cluster.connect() @@ -422,3 +433,85 @@ def test_string_coverage(self): self.assertIn(query, str(future)) self.assertIn('result', str(future)) + + def test_idle_heartbeat(self): + interval = 1 + cluster = Cluster(protocol_version=PROTOCOL_VERSION, idle_heartbeat_interval=interval) + if PROTOCOL_VERSION < 3: + cluster.set_core_connections_per_host(HostDistance.LOCAL, 1) + session = cluster.connect() + + # This test relies on impl details of connection req id management to see if heartbeats + # are being sent. May need update if impl is changed + connection_request_ids = {} + for h in cluster.get_connection_holders(): + for c in h.get_connections(): + # make sure none are idle (should have startup messages) + self.assertFalse(c.is_idle) + with c.lock: + connection_request_ids[id(c)] = deque(c.request_ids) # copy of request ids + + # let two heatbeat intervals pass (first one had startup messages in it) + time.sleep(2 * interval + interval/10.) + + connections = [c for holders in cluster.get_connection_holders() for c in holders.get_connections()] + + # make sure requests were sent on all connections + for c in connections: + expected_ids = connection_request_ids[id(c)] + expected_ids.rotate(-1) + with c.lock: + self.assertListEqual(list(c.request_ids), list(expected_ids)) + + # assert idle status + self.assertTrue(all(c.is_idle for c in connections)) + + # send messages on all connections + statements_and_params = [("SELECT release_version FROM system.local", ())] * len(cluster.metadata.all_hosts()) + results = execute_concurrent(session, statements_and_params) + for success, result in results: + self.assertTrue(success) + + # assert not idle status + self.assertFalse(any(c.is_idle if not c.is_control_connection else False for c in connections)) + + # holders include session pools and cc + holders = cluster.get_connection_holders() + self.assertIn(cluster.control_connection, holders) + self.assertEqual(len(holders), len(cluster.metadata.all_hosts()) + 1) # hosts pools, 1 for cc + + # include additional sessions + session2 = cluster.connect() + + holders = cluster.get_connection_holders() + self.assertIn(cluster.control_connection, holders) + self.assertEqual(len(holders), 2 * len(cluster.metadata.all_hosts()) + 1) # 2 sessions' hosts pools, 1 for cc + + # exclude removed sessions + session2.shutdown() + del session2 + + holders = cluster.get_connection_holders() + self.assertIn(cluster.control_connection, holders) + self.assertEqual(len(holders), len(cluster.metadata.all_hosts()) + 1) # hosts pools, 1 for cc + + session.shutdown() + + @patch('cassandra.cluster.Cluster.idle_heartbeat_interval', new=0.1) + def test_idle_heartbeat_disabled(self): + self.assertTrue(Cluster.idle_heartbeat_interval) + + # heartbeat disabled with '0' + cluster = Cluster(protocol_version=PROTOCOL_VERSION, idle_heartbeat_interval=0) + self.assertEqual(cluster.idle_heartbeat_interval, 0) + session = cluster.connect() + + # let two heatbeat intervals pass (first one had startup messages in it) + time.sleep(2 * Cluster.idle_heartbeat_interval) + + connections = [c for holders in cluster.get_connection_holders() for c in holders.get_connections()] + + # assert not idle status (should never get reset because there is not heartbeat) + self.assertFalse(any(c.is_idle for c in connections)) + + session.shutdown() From a67d58a5a6c56cf08bb2962ca6fcfac68a7f6201 Mon Sep 17 00:00:00 2001 From: Mission Liao Date: Thu, 22 Jan 2015 10:07:41 +0800 Subject: [PATCH 1168/3726] refine usage of assertRaises --- cqlengine/tests/test_ifnotexists.py | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/cqlengine/tests/test_ifnotexists.py b/cqlengine/tests/test_ifnotexists.py index 6b65dd2593..dc175f87e2 100644 --- a/cqlengine/tests/test_ifnotexists.py +++ b/cqlengine/tests/test_ifnotexists.py @@ -63,10 +63,8 @@ def test_insert_if_not_exists_success(self): id = uuid4() TestIfNotExistsModel.create(id=id, count=8, text='123456789') - self.assertRaises( - LWTException, - TestIfNotExistsModel.if_not_exists().create, id=id, count=9, text='111111111111' - ) + with self.assertRaises(LWTException): + TestIfNotExistsModel.if_not_exists().create(id=id, count=9, text='111111111111') q = TestIfNotExistsModel.objects(id=id) self.assertEqual(len(q), 1) @@ -101,7 +99,8 @@ def test_batch_insert_if_not_exists_success(self): b = BatchQuery() TestIfNotExistsModel.batch(b).if_not_exists().create(id=id, count=9, text='111111111111') - self.assertRaises(LWTException, b.execute) + with self.assertRaises(LWTException): + b.execute() q = TestIfNotExistsModel.objects(id=id) self.assertEqual(len(q), 1) @@ -196,7 +195,6 @@ def test_instance_raise_exception(self): if_not_exists on table with counter column """ id = uuid4() - self.assertRaises( - IfNotExistsWithCounterColumn, - TestIfNotExistsWithCounterModel.if_not_exists - ) + with self.assertRaises(IfNotExistsWithCounterColumn): + TestIfNotExistsWithCounterModel.if_not_exists() + From 61a0ec316065d07f806b6c27d36ab67cd626380d Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 22 Jan 2015 08:30:41 -0600 Subject: [PATCH 1169/3726] six.moves.range in place of xrange --- cassandra/protocol.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cassandra/protocol.py b/cassandra/protocol.py index 4ac65f5acb..af279ef959 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -629,14 +629,14 @@ def read_type(cls, f, user_type_map): typeclass = typeclass.apply_parameters((keysubtype, valsubtype)) elif typeclass == TupleType: num_items = read_short(f) - types = tuple(cls.read_type(f, user_type_map) for _ in xrange(num_items)) + types = tuple(cls.read_type(f, user_type_map) for _ in range(num_items)) typeclass = typeclass.apply_parameters(types) elif typeclass == UserType: ks = read_string(f) udt_name = read_string(f) num_fields = read_short(f) names_and_types = tuple((read_string(f), cls.read_type(f, user_type_map)) - for _ in xrange(num_fields)) + for _ in range(num_fields)) mapped_class = user_type_map.get(ks, {}).get(udt_name) typeclass = typeclass.make_udt_class( ks, udt_name, names_and_types, mapped_class) From a2ed58e72dae184f7c43461b36fdf762efb1ce5e Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 22 Jan 2015 09:24:10 -0600 Subject: [PATCH 1170/3726] Improve message on bind type error. Also updated integration/standard/test_types to work in python3 --- cassandra/query.py | 4 ++-- tests/integration/standard/test_types.py | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/cassandra/query.py b/cassandra/query.py index 1b0a56d4a8..c159e7877b 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -500,13 +500,13 @@ def bind(self, values): try: self.values.append(col_type.serialize(value, proto_version)) - except (TypeError, struct.error): + except (TypeError, struct.error) as exc: col_name = col_spec[2] expected_type = col_type actual_type = type(value) message = ('Received an argument of invalid type for column "%s". ' - 'Expected: %s, Got: %s' % (col_name, expected_type, actual_type)) + 'Expected: %s, Got: %s; (%s)' % (col_name, expected_type, actual_type, exc)) raise TypeError(message) return self diff --git a/tests/integration/standard/test_types.py b/tests/integration/standard/test_types.py index b0a47c06eb..b10f3e6be8 100644 --- a/tests/integration/standard/test_types.py +++ b/tests/integration/standard/test_types.py @@ -700,10 +700,10 @@ def test_tuples_with_nulls(self): self.assertEquals((None, None, None, None), s.execute(read)[0].t) # also test empty strings where compatible - s.execute(insert, [('', None, None, '')]) + s.execute(insert, [('', None, None, b'')]) result = s.execute("SELECT * FROM mytable WHERE k=0") - self.assertEquals(('', None, None, ''), result[0].t) - self.assertEquals(('', None, None, ''), s.execute(read)[0].t) + self.assertEquals(('', None, None, b''), result[0].t) + self.assertEquals(('', None, None, b''), s.execute(read)[0].t) c.shutdown() From ad920171d769524c6b031f6438b7592a82940505 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 22 Jan 2015 17:59:04 -0600 Subject: [PATCH 1171/3726] Register udt namedtuple types in cassandra.cqltypes This provides type visibility for pickling when deserializing unregistered udts as keys to maps. --- cassandra/cqltypes.py | 31 ++++++++++++++++++++++--------- 1 file changed, 22 insertions(+), 9 deletions(-) diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index 4379563fcd..8a06560c4c 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -31,14 +31,14 @@ from binascii import unhexlify import calendar from collections import namedtuple +from datetime import datetime, timedelta from decimal import Decimal import io import re import socket import time -from datetime import datetime, timedelta +import sys from uuid import UUID -import warnings import six from six.moves import range @@ -823,22 +823,23 @@ class UserType(TupleType): typename = "'org.apache.cassandra.db.marshal.UserType'" _cache = {} + _module = sys.modules[__name__] @classmethod def make_udt_class(cls, keyspace, udt_name, names_and_types, mapped_class): if six.PY2 and isinstance(udt_name, unicode): udt_name = udt_name.encode('utf-8') - try: return cls._cache[(keyspace, udt_name)] except KeyError: - fieldnames, types = zip(*names_and_types) + field_names, types = zip(*names_and_types) instance = type(udt_name, (cls,), {'subtypes': types, 'cassname': cls.cassname, 'typename': udt_name, - 'fieldnames': fieldnames, + 'fieldnames': field_names, 'keyspace': keyspace, - 'mapped_class': mapped_class}) + 'mapped_class': mapped_class, + 'tuple_type': cls._make_registered_udt_namedtuple(keyspace, udt_name, field_names)}) cls._cache[(keyspace, udt_name)] = instance return instance @@ -853,7 +854,8 @@ def apply_parameters(cls, subtypes, names): 'typename': udt_name, 'fieldnames': field_names, 'keyspace': keyspace, - 'mapped_class': None}) + 'mapped_class': None, + 'tuple_type': namedtuple(udt_name, field_names)}) @classmethod def cql_parameterized_type(cls): @@ -885,8 +887,7 @@ def deserialize_safe(cls, byts, protocol_version): if cls.mapped_class: return cls.mapped_class(**dict(zip(cls.fieldnames, values))) else: - Result = namedtuple(cls.typename, cls.fieldnames) - return Result(*values) + return cls.tuple_type(*values) @classmethod def serialize_safe(cls, val, protocol_version): @@ -902,6 +903,18 @@ def serialize_safe(cls, val, protocol_version): buf.write(int32_pack(-1)) return buf.getvalue() + @classmethod + def _make_registered_udt_namedtuple(cls, keyspace, name, field_names): + # this is required to make the type resolvable via this module... + # required when unregistered udts are pickled for use as keys in + # util.OrderedMap + qualified_name = "%s_%s" % (keyspace, name) + nt = getattr(cls._module, qualified_name, None) + if not nt: + nt = namedtuple(qualified_name, field_names) + setattr(cls._module, qualified_name, nt) + return nt + class CompositeType(_ParameterizedType): typename = "'org.apache.cassandra.db.marshal.CompositeType'" From d6aa371202054f2a3b4a69cdb880ad9937f2367b Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 22 Jan 2015 18:00:49 -0600 Subject: [PATCH 1172/3726] Nested collections and UDTs integration test --- tests/integration/standard/test_types.py | 65 ++++++++++++++++++++++++ 1 file changed, 65 insertions(+) diff --git a/tests/integration/standard/test_types.py b/tests/integration/standard/test_types.py index b10f3e6be8..f86cf6e7b9 100644 --- a/tests/integration/standard/test_types.py +++ b/tests/integration/standard/test_types.py @@ -21,8 +21,10 @@ import logging log = logging.getLogger(__name__) +from collections import namedtuple from decimal import Decimal from datetime import datetime +from functools import partial import six from uuid import uuid1, uuid4 @@ -34,6 +36,10 @@ from tests.integration import get_server_versions, use_singledc, PROTOCOL_VERSION +# defined in module scope for pickling in OrderedMap +nested_collection_udt = namedtuple('nested_collection_udt', ['m', 't', 'l', 's']) +nested_collection_udt_nested = namedtuple('nested_collection_udt_nested', ['m', 't', 'l', 's', 'u']) + def setup_module(): use_singledc() @@ -713,3 +719,62 @@ def test_unicode_query_string(self): query = u"SELECT * FROM system.schema_columnfamilies WHERE keyspace_name = 'ef\u2052ef' AND columnfamily_name = %s" s.execute(query, (u"fe\u2051fe",)) + + def insert_select_column(self, session, table_name, column_name, value): + insert = session.prepare("INSERT INTO %s (k, %s) VALUES (?, ?)" % (table_name, column_name)) + session.execute(insert, (0, value)) + result = session.execute("SELECT %s FROM %s WHERE k=%%s" % (column_name, table_name), (0,))[0][0] + self.assertEqual(result, value) + + def test_nested_collections(self): + + if self._cass_version < (2, 1, 3): + raise unittest.SkipTest("Support for nested collections was introduced in Cassandra 2.1.3") + + name = self._testMethodName + + c = Cluster(protocol_version=PROTOCOL_VERSION) + s = c.connect('test1rf') + s.encoder.mapping[tuple] = s.encoder.cql_encode_tuple + + s.execute(""" + CREATE TYPE %s ( + m frozen>, + t tuple, + l frozen>, + s frozen> + )""" % name) + s.execute(""" + CREATE TYPE %s_nested ( + m frozen>, + t tuple, + l frozen>, + s frozen>, + u frozen<%s> + )""" % (name, name)) + s.execute(""" + CREATE TABLE %s ( + k int PRIMARY KEY, + map_map map>, frozen>>, + map_set map>, frozen>>, + map_list map>, frozen>>, + map_tuple map>, frozen>>, + map_udt map, frozen<%s>>, + )""" + % (name, name, name)) + + validate = partial(self.insert_select_column, s, name) + validate('map_map', OrderedMap([({1: 1, 2: 2}, {3: 3, 4: 4}), ({5: 5, 6: 6}, {7: 7, 8: 8})])) + validate('map_set', OrderedMap([(set((1, 2)), set((3, 4))), (set((5, 6)), set((7, 8)))])) + validate('map_list', OrderedMap([([1, 2], [3, 4]), ([5, 6], [7, 8])])) + validate('map_tuple', OrderedMap([((1, 2), (3,)), ((4, 5), (6,))])) + + value = nested_collection_udt({1: 'v1', 2: 'v2'}, (3, 'v3'), [4, 5, 6, 7], set((8,9,10))) + key = nested_collection_udt_nested(value.m, value.t, value.l, value.s, value) + key2 = nested_collection_udt_nested({3: 'v3'}, value.t, value.l, value.s, value) + validate('map_udt', OrderedMap([(key, value), (key2, value)])) + + s.execute("DROP TABLE %s" % (name)) + s.execute("DROP TYPE %s_nested" % (name)) + s.execute("DROP TYPE %s" % (name)) + s.shutdown() From 81811d2aae803c2bccd75b23bcac1313bf0cd07f Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 23 Jan 2015 11:19:19 -0600 Subject: [PATCH 1173/3726] quiescent pool state tests tests for in_flight == 0 after various cluster operations related to PYTHON-195 also changes pool.get_state to return a dict of properties (instead of a string) for easier testing --- cassandra/pool.py | 11 +++--- tests/integration/standard/test_cluster.py | 41 ++++++++++++++++++++++ tests/integration/util.py | 24 +++++++++++++ 3 files changed, 71 insertions(+), 5 deletions(-) create mode 100644 tests/integration/util.py diff --git a/cassandra/pool.py b/cassandra/pool.py index 384378a0b4..a99724b06b 100644 --- a/cassandra/pool.py +++ b/cassandra/pool.py @@ -366,9 +366,10 @@ def get_connections(self): return [c] if c else [] def get_state(self): - have_conn = self._connection is not None - in_flight = self._connection.in_flight if have_conn else 0 - return "shutdown: %s, open: %s, in_flights: %s" % (self.is_shutdown, have_conn, in_flight) + connection = self._connection + open_count = 1 if connection and not (connection.is_closed or connection.is_defunct) else 0 + in_flights = [connection.in_flight] if connection else [] + return {'shutdown': self.is_shutdown, 'open_count': open_count, 'in_flights': in_flights} _MAX_SIMULTANEOUS_CREATION = 1 @@ -703,5 +704,5 @@ def get_connections(self): return self._connections def get_state(self): - in_flights = ", ".join([str(c.in_flight) for c in self._connections]) - return "shutdown: %s, open_count: %d, in_flights: %s" % (self.is_shutdown, self.open_count, in_flights) + in_flights = [c.in_flight for c in self._connections] + return {'shutdown': self.is_shutdown, 'open_count': self.open_count, 'in_flights': in_flights} diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index 7c23d81596..6c8b2be47e 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -31,6 +31,7 @@ from cassandra.query import SimpleStatement, TraceUnavailable from tests.integration import use_singledc, PROTOCOL_VERSION, get_server_versions +from tests.integration.util import assert_quiescent_pool_state def setup_module(): @@ -495,6 +496,10 @@ def test_idle_heartbeat(self): self.assertIn(cluster.control_connection, holders) self.assertEqual(len(holders), len(cluster.metadata.all_hosts()) + 1) # hosts pools, 1 for cc + cluster._idle_heartbeat.stop() + cluster._idle_heartbeat.join() + assert_quiescent_pool_state(self, cluster) + session.shutdown() @patch('cassandra.cluster.Cluster.idle_heartbeat_interval', new=0.1) @@ -515,3 +520,39 @@ def test_idle_heartbeat_disabled(self): self.assertFalse(any(c.is_idle for c in connections)) session.shutdown() + + def test_pool_management(self): + # Ensure that in_flight and request_ids quiesce after cluster operations + cluster = Cluster(protocol_version=PROTOCOL_VERSION, idle_heartbeat_interval=0) # no idle heartbeat here, pool management is tested in test_idle_heartbeat + session = cluster.connect() + session2 = cluster.connect() + + # prepare + p = session.prepare("SELECT * FROM system.local WHERE key=?") + self.assertTrue(session.execute(p, ('local',))) + + # simple + self.assertTrue(session.execute("SELECT * FROM system.local WHERE key='local'")) + + # set keyspace + session.set_keyspace('system') + session.set_keyspace('system_traces') + + # use keyspace + session.execute('USE system') + session.execute('USE system_traces') + + # refresh schema + cluster.refresh_schema() + cluster.refresh_schema(max_schema_agreement_wait=0) + + # submit schema refresh + future = cluster.submit_schema_refresh() + future.result() + + assert_quiescent_pool_state(self, cluster) + + session2.shutdown() + del session2 + assert_quiescent_pool_state(self, cluster) + session.shutdown() diff --git a/tests/integration/util.py b/tests/integration/util.py new file mode 100644 index 0000000000..2857171c5f --- /dev/null +++ b/tests/integration/util.py @@ -0,0 +1,24 @@ +# Copyright 2013-2014 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +def assert_quiescent_pool_state(test_case, cluster): + + for session in cluster.sessions: + pool_states = session.get_pool_state().values() + test_case.assertTrue(pool_states) + + for state in pool_states: + test_case.assertFalse(state['shutdown']) + test_case.assertGreater(state['open_count'], 0) + test_case.assertTrue(all((i == 0 for i in state['in_flights']))) From 83a79cbfd4047b98eaea1a74c23cc05df58fdbaf Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 23 Jan 2015 11:57:22 -0600 Subject: [PATCH 1174/3726] Test request id management around authentication related to PYTHON-210 --- tests/integration/standard/test_authentication.py | 14 +++++++++++++- tests/integration/util.py | 13 +++++++++++++ 2 files changed, 26 insertions(+), 1 deletion(-) diff --git a/tests/integration/standard/test_authentication.py b/tests/integration/standard/test_authentication.py index fdb338bf40..f248888823 100644 --- a/tests/integration/standard/test_authentication.py +++ b/tests/integration/standard/test_authentication.py @@ -14,10 +14,11 @@ import logging -from tests.integration import use_singledc, get_cluster, remove_cluster, PROTOCOL_VERSION from cassandra.cluster import Cluster, NoHostAvailable from cassandra.auth import PlainTextAuthProvider, SASLClient, SaslAuthProvider +from tests.integration import use_singledc, get_cluster, remove_cluster, PROTOCOL_VERSION +from tests.integration.util import assert_quiescent_pool_state try: import unittest2 as unittest @@ -65,6 +66,7 @@ def get_authentication_provider(self, username, password): def cluster_as(self, usr, pwd): return Cluster(protocol_version=PROTOCOL_VERSION, + idle_heartbeat_interval=0, auth_provider=self.get_authentication_provider(username=usr, password=pwd)) def test_auth_connect(self): @@ -77,9 +79,11 @@ def test_auth_connect(self): cluster = self.cluster_as(user, passwd) session = cluster.connect() self.assertTrue(session.execute('SELECT release_version FROM system.local')) + assert_quiescent_pool_state(self, cluster) cluster.shutdown() root_session.execute('DROP USER %s', user) + assert_quiescent_pool_state(self, root_session.cluster) root_session.cluster.shutdown() def test_connect_wrong_pwd(self): @@ -88,6 +92,8 @@ def test_connect_wrong_pwd(self): '.*AuthenticationFailed.*Bad credentials.*Username and/or ' 'password are incorrect.*', cluster.connect) + assert_quiescent_pool_state(self, cluster) + cluster.shutdown() def test_connect_wrong_username(self): cluster = self.cluster_as('wrong_user', 'cassandra') @@ -95,6 +101,8 @@ def test_connect_wrong_username(self): '.*AuthenticationFailed.*Bad credentials.*Username and/or ' 'password are incorrect.*', cluster.connect) + assert_quiescent_pool_state(self, cluster) + cluster.shutdown() def test_connect_empty_pwd(self): cluster = self.cluster_as('Cassandra', '') @@ -102,12 +110,16 @@ def test_connect_empty_pwd(self): '.*AuthenticationFailed.*Bad credentials.*Username and/or ' 'password are incorrect.*', cluster.connect) + assert_quiescent_pool_state(self, cluster) + cluster.shutdown() def test_connect_no_auth_provider(self): cluster = Cluster(protocol_version=PROTOCOL_VERSION) self.assertRaisesRegexp(NoHostAvailable, '.*AuthenticationFailed.*Remote end requires authentication.*', cluster.connect) + assert_quiescent_pool_state(self, cluster) + cluster.shutdown() class SaslAuthenticatorTests(AuthenticationTests): diff --git a/tests/integration/util.py b/tests/integration/util.py index 2857171c5f..08ebac0657 100644 --- a/tests/integration/util.py +++ b/tests/integration/util.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +from tests.integration import PROTOCOL_VERSION + def assert_quiescent_pool_state(test_case, cluster): for session in cluster.sessions: @@ -22,3 +24,14 @@ def assert_quiescent_pool_state(test_case, cluster): test_case.assertFalse(state['shutdown']) test_case.assertGreater(state['open_count'], 0) test_case.assertTrue(all((i == 0 for i in state['in_flights']))) + + for holder in cluster.get_connection_holders(): + for connection in holder.get_connections(): + # all ids are unique + req_ids = connection.request_ids + test_case.assertEqual(len(req_ids), len(set(req_ids))) + test_case.assertEqual(connection.highest_request_id, len(req_ids) - 1) + test_case.assertEqual(connection.highest_request_id, max(req_ids)) + if PROTOCOL_VERSION < 3: + test_case.assertEqual(connection.highest_request_id, connection.max_request_id) + From 63af8010bb07cf12370d4b6ea08094358219b66a Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 23 Jan 2015 17:40:46 -0600 Subject: [PATCH 1175/3726] Bail on some tests for python3, where static string depend on dict ordering --- setup.py | 8 ++------ tests/integration/standard/test_cluster.py | 10 ---------- tests/integration/standard/test_metadata.py | 16 ++++++++++++---- 3 files changed, 14 insertions(+), 20 deletions(-) diff --git a/setup.py b/setup.py index ec7f6edd74..8536755e00 100644 --- a/setup.py +++ b/setup.py @@ -56,16 +56,11 @@ from nose.commands import nosetests except ImportError: gevent_nosetests = None + eventlet_nosetests = None else: class gevent_nosetests(nosetests): description = "run nosetests with gevent monkey patching" - -try: - from nose.commands import nosetests -except ImportError: - eventlet_nosetests = None -else: class eventlet_nosetests(nosetests): description = "run nosetests with eventlet monkey patching" @@ -188,6 +183,7 @@ def build_extension(self, ext): def run_setup(extensions): + kw = {'cmdclass': {'doc': DocCommand}} if gevent_nosetests is not None: kw['cmdclass']['gevent_nosetests'] = gevent_nosetests diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index 6c8b2be47e..95e6082b2f 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -488,14 +488,6 @@ def test_idle_heartbeat(self): self.assertIn(cluster.control_connection, holders) self.assertEqual(len(holders), 2 * len(cluster.metadata.all_hosts()) + 1) # 2 sessions' hosts pools, 1 for cc - # exclude removed sessions - session2.shutdown() - del session2 - - holders = cluster.get_connection_holders() - self.assertIn(cluster.control_connection, holders) - self.assertEqual(len(holders), len(cluster.metadata.all_hosts()) + 1) # hosts pools, 1 for cc - cluster._idle_heartbeat.stop() cluster._idle_heartbeat.join() assert_quiescent_pool_state(self, cluster) @@ -553,6 +545,4 @@ def test_pool_management(self): assert_quiescent_pool_state(self, cluster) session2.shutdown() - del session2 - assert_quiescent_pool_state(self, cluster) session.shutdown() diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 344e5491ef..1e46d3c55a 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -11,15 +11,16 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import six -import difflib try: import unittest2 as unittest except ImportError: import unittest # noqa +import difflib from mock import Mock +import six +import sys from cassandra import AlreadyExists @@ -361,6 +362,9 @@ def test_export_keyspace_schema_udts(self): "Protocol 3.0+ is required for UDT change events, currently testing against %r" % (PROTOCOL_VERSION,)) + if sys.version_info[2:] != (2, 7): + raise unittest.SkipTest('This test compares static strings generated from dict items, which may change orders. Test with 2.7.') + cluster = Cluster(protocol_version=PROTOCOL_VERSION) session = cluster.connect() @@ -549,6 +553,9 @@ def test_legacy_tables(self): if get_server_versions()[0] < (2, 1, 0): raise unittest.SkipTest('Test schema output assumes 2.1.0+ options') + if sys.version_info[2:] != (2, 7): + raise unittest.SkipTest('This test compares static strings generated from dict items, which may change orders. Test with 2.7.') + cli_script = """CREATE KEYSPACE legacy WITH placement_strategy = 'SimpleStrategy' AND strategy_options = {replication_factor:1}; @@ -633,7 +640,7 @@ def test_legacy_tables(self): index_type : 0}] and compression_options = {'sstable_compression' : 'org.apache.cassandra.io.compress.LZ4Compressor'};""" - # note: the innerlkey type for legacy.nested_composite_key + # note: the inner key type for legacy.nested_composite_key # (org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UUIDType, org.apache.cassandra.db.marshal.UTF8Type)) # is a bit strange, but it replays in CQL with desired results expected_string = """CREATE KEYSPACE legacy WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'} AND durable_writes = true; @@ -807,6 +814,7 @@ def test_legacy_tables(self): cluster.shutdown() + class TokenMetadataTest(unittest.TestCase): """ Test of TokenMap creation and other behavior. @@ -882,7 +890,7 @@ def test_keyspace_alter(self): self.assertEqual(original_keyspace_meta.durable_writes, True) self.assertEqual(len(original_keyspace_meta.tables), 1) - self.session.execute('ALTER KEYSPACE %s WITH durable_writes = false' %name) + self.session.execute('ALTER KEYSPACE %s WITH durable_writes = false' % name) new_keyspace_meta = self.cluster.metadata.keyspaces[name] self.assertNotEqual(original_keyspace_meta, new_keyspace_meta) self.assertEqual(new_keyspace_meta.durable_writes, False) From 3db158951c609c183adbd3ef21621ff07c103992 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 26 Jan 2015 15:10:26 -0600 Subject: [PATCH 1176/3726] Add note about using protocol v3 with nested collections --- cassandra/util.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/cassandra/util.py b/cassandra/util.py index 6bc572e84e..cf5e0f4a2b 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -576,7 +576,7 @@ class OrderedMap(Mapping): ['value', 'value2'] These constructs are needed to support nested collections in Cassandra 2.1.3+, - where frozen collections can be specified as parameters to others:: + where frozen collections can be specified as parameters to others\*:: CREATE TABLE example ( ... @@ -587,6 +587,10 @@ class OrderedMap(Mapping): This class dervies from the (immutable) Mapping API. Objects in these maps are not intended be modified. + \* Note: Because of the way Cassandra encodes nested types, when using the + driver with nested collections, :attr:`~.Cluster.protocol_version` must be 3 + or higher. + ''' def __init__(self, *args, **kwargs): if len(args) > 1: From 98e0e3259e108c032c2f3b7b48b06a84eaaa7e9e Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 26 Jan 2015 15:21:05 -0600 Subject: [PATCH 1177/3726] Update saslauth added ver. Add SaslAuth to API docs. --- cassandra/auth.py | 11 +++++++---- docs/api/cassandra/auth.rst | 6 ++++++ 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/cassandra/auth.py b/cassandra/auth.py index 77c5bcf985..fc13a82132 100644 --- a/cassandra/auth.py +++ b/cassandra/auth.py @@ -130,7 +130,9 @@ def evaluate_challenge(self, challenge): class SaslAuthProvider(AuthProvider): """ - An :class:`~.AuthProvider` for Kerberos authenticators + An :class:`~.AuthProvider` supporting general SASL auth mechanisms + + Suitable for GSSAPI or other SASL mechanisms Example usage:: @@ -144,7 +146,7 @@ class SaslAuthProvider(AuthProvider): auth_provider = SaslAuthProvider(**sasl_kwargs) cluster = Cluster(auth_provider=auth_provider) - .. versionadded:: 2.1.3-post + .. versionadded:: 2.1.4 """ def __init__(self, **sasl_kwargs): @@ -157,9 +159,10 @@ def new_authenticator(self, host): class SaslAuthenticator(Authenticator): """ - An :class:`~.Authenticator` that works with DSE's KerberosAuthenticator. + A pass-through :class:`~.Authenticator` using the third party package + 'pure-sasl' for authentication - .. versionadded:: 2.1.3-post + .. versionadded:: 2.1.4 """ def __init__(self, host, service, mechanism='GSSAPI', **sasl_kwargs): diff --git a/docs/api/cassandra/auth.rst b/docs/api/cassandra/auth.rst index 0ee6e53911..58c964cf89 100644 --- a/docs/api/cassandra/auth.rst +++ b/docs/api/cassandra/auth.rst @@ -14,3 +14,9 @@ .. autoclass:: PlainTextAuthenticator :members: + +.. autoclass:: SaslAuthProvider + :members: + +.. autoclass:: SaslAuthenticator + :members: From 1f1a66419efc70ec13a150636c72fc3f6014bca7 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 26 Jan 2015 15:37:45 -0600 Subject: [PATCH 1178/3726] Add cluster idle_heartbeat_interval and refresh_schema to docs --- cassandra/cluster.py | 6 +++++- docs/api/cassandra/cluster.rst | 4 ++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 2c5e45c34b..b0d7dcc057 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1077,10 +1077,14 @@ def _ensure_core_connections(self): def refresh_schema(self, keyspace=None, table=None, usertype=None, max_schema_agreement_wait=None): """ Synchronously refresh the schema metadata. - By default timeout for this operation is governed by :attr:`~.Cluster.max_schema_agreement_wait` + + By default, the timeout for this operation is governed by :attr:`~.Cluster.max_schema_agreement_wait` and :attr:`~.Cluster.control_connection_timeout`. + Passing max_schema_agreement_wait here overrides :attr:`~.Cluster.max_schema_agreement_wait`. + Setting max_schema_agreement_wait <= 0 will bypass schema agreement and refresh schema immediately. + An Exception is raised if schema refresh fails for any reason. """ if not self.control_connection.refresh_schema(keyspace, table, usertype, max_schema_agreement_wait): diff --git a/docs/api/cassandra/cluster.rst b/docs/api/cassandra/cluster.rst index 2d6a5342b9..f0a9a312dc 100644 --- a/docs/api/cassandra/cluster.rst +++ b/docs/api/cassandra/cluster.rst @@ -39,6 +39,8 @@ .. autoattribute:: control_connection_timeout + .. autoattribute:: idle_heartbeat_interval + .. automethod:: connect .. automethod:: shutdown @@ -57,6 +59,8 @@ .. automethod:: set_max_connections_per_host + .. automethod:: refresh_schema + .. autoclass:: Session () .. autoattribute:: default_timeout From 6d8eb3ccc43ea1347fd635cc99dbbc4f95601d69 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 26 Jan 2015 15:54:01 -0600 Subject: [PATCH 1179/3726] Mention SaslAuthProvider in doc security section. --- docs/security.rst | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/docs/security.rst b/docs/security.rst index c87c5de8d1..9f7af68b4d 100644 --- a/docs/security.rst +++ b/docs/security.rst @@ -34,9 +34,11 @@ to be explicit. Custom Authenticators ^^^^^^^^^^^^^^^^^^^^^ If you're using something other than Cassandra's ``PasswordAuthenticator``, -you may need to create your own subclasses of :class:`~.AuthProvider` and -:class:`~.Authenticator`. You can use :class:`~.PlainTextAuthProvider` -and :class:`~.PlainTextAuthenticator` as example implementations. +:class:`~.SaslAuthProvider` is provided for generic SASL authentication mechanisms, +utilizing the ``pure-sasl`` package. +If these do not suit your needs, you may need to create your own subclasses of +:class:`~.AuthProvider` and :class:`~.Authenticator`. You can use the Sasl classes +as example implementations. Protocol v1 Authentication ^^^^^^^^^^^^^^^^^^^^^^^^^^ From 817c49988a5fadf0e84a12583b619c9f021ca19f Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 26 Jan 2015 16:02:29 -0600 Subject: [PATCH 1180/3726] Changelog update for 2.1.4 --- CHANGELOG.rst | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index aaa67c8e5e..47b9d29cc1 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,14 +1,22 @@ -2.1.3-post -========== +2.1.4 +===== Features -------- * SaslAuthenticator for Kerberos support (PYTHON-109) +* Heartbeat for network device keepalive and detecting failures on idle connections (PYTHON-197) +* Support nested, frozen collections for Cassandra 2.1.3+ (PYTHON-186) +* Schema agreement wait bypass config, new call for synchronous schema refresh (PYTHON-205) +* Add eventlet connection support (PYTHON-194) Bug Fixes --------- * Schema meta fix for complex thrift tables (PYTHON-191) * Support for 'unknown' replica placement strategies in schema meta (PYTHON-192) +* Resolve stream ID leak on set_keyspace (PYTHON-195) +* Remove implicit timestamp scaling on serialization of numeric timestamps (PYTHON-204) +* Resolve stream id collision when using SASL auth (PYTHON-210) +* Correct unhexlify usage for user defined type meta in Python3 (PYTHON-208) 2.1.3 ===== From e2c93c5376ed7193da57e6971d9e85df067ce193 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 26 Jan 2015 22:10:50 +0000 Subject: [PATCH 1181/3726] Version update for release. --- cassandra/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 93c53dcce4..f68323e6f7 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -23,7 +23,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (2, 1, 3, 'post') +__version_info__ = (2, 1, 4) __version__ = '.'.join(map(str, __version_info__)) From 7b503df63116ef335a2cddeffc375e5593a62921 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 26 Jan 2015 22:21:27 +0000 Subject: [PATCH 1182/3726] Post-release version update --- cassandra/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index f68323e6f7..96f085767c 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -23,7 +23,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (2, 1, 4) +__version_info__ = (2, 1, 4, 'post') __version__ = '.'.join(map(str, __version_info__)) From 18c33ff30d124fdc4f372ab8c4c0d31f5de9de3d Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 28 Jan 2015 10:45:49 -0600 Subject: [PATCH 1183/3726] Prevent node refresh on node discovery during node refresh. --- cassandra/cluster.py | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index b0d7dcc057..87514d35e6 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -936,7 +936,7 @@ def on_down(self, host, is_host_addition, expect_host_to_be_down=False): self._start_reconnector(host, is_host_addition) - def on_add(self, host): + def on_add(self, host, refresh_nodes=True): if self.is_shutdown: return @@ -948,7 +948,7 @@ def on_add(self, host): log.debug("Done preparing queries for new host %r", host) self.load_balancing_policy.on_add(host) - self.control_connection.on_add(host) + self.control_connection.on_add(host, refresh_nodes) if distance == HostDistance.IGNORED: log.debug("Not adding connection pool for new host %r because the " @@ -1024,7 +1024,7 @@ def signal_connection_failure(self, host, connection_exc, is_host_addition, expe self.on_down(host, is_host_addition, expect_host_to_be_down) return is_down - def add_host(self, address, datacenter=None, rack=None, signal=True): + def add_host(self, address, datacenter=None, rack=None, signal=True, refresh_nodes=True): """ Called when adding initial contact points and when the control connection subsequently discovers a new node. Intended for internal @@ -1033,7 +1033,7 @@ def add_host(self, address, datacenter=None, rack=None, signal=True): new_host = self.metadata.add_host(address, datacenter, rack) if new_host and signal: log.info("New Cassandra host %r discovered", new_host) - self.on_add(new_host) + self.on_add(new_host, refresh_nodes) return new_host @@ -1080,11 +1080,11 @@ def refresh_schema(self, keyspace=None, table=None, usertype=None, max_schema_ag By default, the timeout for this operation is governed by :attr:`~.Cluster.max_schema_agreement_wait` and :attr:`~.Cluster.control_connection_timeout`. - + Passing max_schema_agreement_wait here overrides :attr:`~.Cluster.max_schema_agreement_wait`. - + Setting max_schema_agreement_wait <= 0 will bypass schema agreement and refresh schema immediately. - + An Exception is raised if schema refresh fails for any reason. """ if not self.control_connection.refresh_schema(keyspace, table, usertype, max_schema_agreement_wait): @@ -2132,7 +2132,7 @@ def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, rack = row.get("rack") if host is None: log.debug("[control connection] Found new host to connect to: %s", addr) - host = self._cluster.add_host(addr, datacenter, rack, signal=True) + host = self._cluster.add_host(addr, datacenter, rack, signal=True, refresh_nodes=False) should_rebuild_token_map = True else: should_rebuild_token_map |= self._update_location_info(host, datacenter, rack) @@ -2320,8 +2320,9 @@ def on_down(self, host): # this will result in a task being submitted to the executor to reconnect self.reconnect() - def on_add(self, host): - self.refresh_node_list_and_token_map(force_token_rebuild=True) + def on_add(self, host, refresh_nodes=True): + if refresh_nodes: + self.refresh_node_list_and_token_map(force_token_rebuild=True) def on_remove(self, host): self.refresh_node_list_and_token_map(force_token_rebuild=True) From ae20e9977b2a84788293794e6b7a3637f9797769 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 28 Jan 2015 13:36:29 -0600 Subject: [PATCH 1184/3726] Randomize meta refresh on events. Add config options for refresh window. Changes to mitigate 'thundering herd' problem on events in deployments with large C*->Client fanout. --- cassandra/cluster.py | 109 ++++++++++++++++++++++++++++++++++--------- 1 file changed, 88 insertions(+), 21 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 87514d35e6..4ca0569ee6 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -22,6 +22,7 @@ from collections import defaultdict from concurrent.futures import ThreadPoolExecutor import logging +from random import random import socket import sys import time @@ -391,6 +392,40 @@ def auth_provider(self, value): Setting to zero disables heartbeats. """ + schema_event_refresh_window = 2 + """ + Window, in seconds, within which a schema component will be refreshed after + receiving a schema_change event. + + The driver delays a random amount of time in the range [0.0, window) + before executing the refresh. This serves two purposes: + + 1.) Spread the refresh for deployments with large fanout from C* to client tier, + preventing a 'thundering herd' problem with many clients refreshing simultaneously. + + 2.) Remove redundant refreshes. Redundant events arriving within the delay period + are discarded, and only one refresh is executed. + + Setting this to zero will execute refreshes immediately. + + Setting this negative will disable schema refreshes in response to push events + (refreshes will still occur in response to schema change responses to DDL statements + executed by Sessions of this Cluster). + """ + + topology_event_refresh_window = 10 + """ + Window, in seconds, within which the node and token list will be refreshed after + receiving a topology_change event. + + Setting this to zero will execute refreshes immediately. + + Setting this negative will disable node refreshes in response to push events + (refreshes will still occur in response to new nodes observed on "UP" events). + + See :attr:`.schema_event_refresh_window` for discussion of rationale + """ + sessions = None control_connection = None scheduler = None @@ -427,7 +462,9 @@ def __init__(self, executor_threads=2, max_schema_agreement_wait=10, control_connection_timeout=2.0, - idle_heartbeat_interval=30): + idle_heartbeat_interval=30, + schema_event_refresh_window=2, + topology_event_refresh_window=10): """ Any of the mutable Cluster attributes may be set as keyword arguments to the constructor. @@ -478,6 +515,8 @@ def __init__(self, self.max_schema_agreement_wait = max_schema_agreement_wait self.control_connection_timeout = control_connection_timeout self.idle_heartbeat_interval = idle_heartbeat_interval + self.schema_event_refresh_window = schema_event_refresh_window + self.topology_event_refresh_window = topology_event_refresh_window self._listeners = set() self._listener_lock = Lock() @@ -522,7 +561,8 @@ def __init__(self, self.metrics = Metrics(weakref.proxy(self)) self.control_connection = ControlConnection( - self, self.control_connection_timeout) + self, self.control_connection_timeout, + self.schema_event_refresh_window, self.topology_event_refresh_window) def register_user_type(self, keyspace, user_type, klass): """ @@ -1770,16 +1810,24 @@ class ControlConnection(object): _timeout = None _protocol_version = None + _schema_event_refresh_window = None + _topology_event_refresh_window = None + # for testing purposes _time = time - def __init__(self, cluster, timeout): + def __init__(self, cluster, timeout, + schema_event_refresh_window, + topology_event_refresh_window): # use a weak reference to allow the Cluster instance to be GC'ed (and # shutdown) since implementing __del__ disables the cycle detector self._cluster = weakref.proxy(cluster) self._connection = None self._timeout = timeout + self._schema_event_refresh_window = schema_event_refresh_window + self._topology_event_refresh_window = topology_event_refresh_window + self._lock = RLock() self._schema_agreement_lock = Lock() @@ -2167,25 +2215,25 @@ def _update_location_info(self, host, datacenter, rack): def _handle_topology_change(self, event): change_type = event["change_type"] addr, port = event["address"] - if change_type == "NEW_NODE": - self._cluster.scheduler.schedule(10, self.refresh_node_list_and_token_map) + if change_type == "NEW_NODE" or change_type == "MOVED_NODE": + if self._topology_event_refresh_window >= 0: + delay = random() * self._topology_event_refresh_window + self._cluster.scheduler.schedule_unique(delay, self.refresh_node_list_and_token_map) elif change_type == "REMOVED_NODE": host = self._cluster.metadata.get_host(addr) - self._cluster.scheduler.schedule(0, self._cluster.remove_host, host) - elif change_type == "MOVED_NODE": - self._cluster.scheduler.schedule(1, self.refresh_node_list_and_token_map) + self._cluster.scheduler.schedule_unique(0, self._cluster.remove_host, host) def _handle_status_change(self, event): change_type = event["change_type"] addr, port = event["address"] host = self._cluster.metadata.get_host(addr) if change_type == "UP": + delay = 1 + random() * 0.5 # randomness to avoid thundering herd problem on events if host is None: # this is the first time we've seen the node - self._cluster.scheduler.schedule(2, self.refresh_node_list_and_token_map) + self._cluster.scheduler.schedule_unique(delay, self.refresh_node_list_and_token_map) else: - # this will be run by the scheduler - self._cluster.scheduler.schedule(2, self._cluster.on_up, host) + self._cluster.scheduler.schedule_unique(delay, self._cluster.on_up, host) elif change_type == "DOWN": # Note that there is a slight risk we can receive the event late and thus # mark the host down even though we already had reconnected successfully. @@ -2196,10 +2244,14 @@ def _handle_status_change(self, event): self._cluster.on_down(host, is_host_addition=False) def _handle_schema_change(self, event): + if self._schema_event_refresh_window < 0: + return + keyspace = event.get('keyspace') table = event.get('table') usertype = event.get('type') - self._submit(self.refresh_schema, keyspace, table, usertype) + delay = random() * self._schema_event_refresh_window + self._cluster.scheduler.schedule_unique(delay, self.refresh_schema, keyspace, table, usertype) def wait_for_schema_agreement(self, connection=None, preloaded_results=None, wait_time=None): @@ -2348,12 +2400,14 @@ def _stop_scheduler(scheduler, thread): class _Scheduler(object): - _scheduled = None + _queue = None + _scheduled_tasks = None _executor = None is_shutdown = False def __init__(self, executor): - self._scheduled = Queue.PriorityQueue() + self._queue = Queue.PriorityQueue() + self._scheduled_tasks = set() self._executor = executor t = Thread(target=self.run, name="Task Scheduler") @@ -2371,12 +2425,24 @@ def shutdown(self): # this can happen on interpreter shutdown pass self.is_shutdown = True - self._scheduled.put_nowait((0, None)) + self._queue.put_nowait((0, None)) + + def schedule(self, delay, fn, *args): + self._insert_task(delay, (fn, args)) + + def schedule_unique(self, delay, fn, *args): + task = (fn, args) + if task not in self._scheduled_tasks: + self._insert_task(delay, task) + else: + log.debug("Ignoring schedule_unique for already-scheduled task: %r", fn) + - def schedule(self, delay, fn, *args, **kwargs): + def _insert_task(self, delay, task): if not self.is_shutdown: run_at = time.time() + delay - self._scheduled.put_nowait((run_at, (fn, args, kwargs))) + self._scheduled_tasks.add(task) + self._queue.put_nowait((run_at, task)) else: log.debug("Ignoring scheduled function after shutdown: %r", fn) @@ -2387,16 +2453,17 @@ def run(self): try: while True: - run_at, task = self._scheduled.get(block=True, timeout=None) + run_at, task = self._queue.get(block=True, timeout=None) if self.is_shutdown: log.debug("Not executing scheduled task due to Scheduler shutdown") return if run_at <= time.time(): - fn, args, kwargs = task - future = self._executor.submit(fn, *args, **kwargs) + self._scheduled_tasks.remove(task) + fn, args = task + future = self._executor.submit(fn, *args) future.add_done_callback(self._log_if_failed) else: - self._scheduled.put_nowait((run_at, task)) + self._queue.put_nowait((run_at, task)) break except Queue.Empty: pass From 39fbdfb3848c7ee57a5579b8ce04b61ff6f2f5cf Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 28 Jan 2015 14:38:35 -0600 Subject: [PATCH 1185/3726] Add API for disabling all meta refresh. Also a synchronous node refresh call. --- cassandra/cluster.py | 48 ++++++++++++++++++++++++++++++++++++++------ 1 file changed, 42 insertions(+), 6 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 4ca0569ee6..acf30efb01 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1139,6 +1139,24 @@ def submit_schema_refresh(self, keyspace=None, table=None, usertype=None): return self.executor.submit( self.control_connection.refresh_schema, keyspace, table, usertype) + def refresh_nodes(self): + """ + Synchronously refresh the node list and token metadata + + An Exception is raised if node refresh fails for any reason. + """ + if not self.control_connection.refresh_node_list_and_token_map(): + raise Exception("Node list was not refreshed. See log for details.") + + def set_meta_refresh_enabled(self, enabled): + """ + Sets a flag to enable (True) or disable (False) all metadata refresh queries. + This applies to both schema and node topology. + + Disabling this is useful to minimize refreshes during multiple changes. + """ + self.control_connection.set_meta_refresh_enabled(bool(enabled)) + def _prepare_all_queries(self, host): if not self._prepared_statements: return @@ -1813,6 +1831,8 @@ class ControlConnection(object): _schema_event_refresh_window = None _topology_event_refresh_window = None + _meta_refresh_enabled = True + # for testing purposes _time = time @@ -1985,6 +2005,10 @@ def shutdown(self): def refresh_schema(self, keyspace=None, table=None, usertype=None, schema_agreement_wait=None): + if not self._meta_refresh_enabled: + log.debug("[control connection] Skipping schema refresh because meta refresh is disabled") + return False + try: if self._connection: return self._refresh_schema(self._connection, keyspace, table, usertype, @@ -2112,14 +2136,20 @@ def _handle_results(success, result): return True def refresh_node_list_and_token_map(self, force_token_rebuild=False): + if not self._meta_refresh_enabled: + log.debug("[control connection] Skipping node list refresh because meta refresh is disabled") + return False + try: if self._connection: self._refresh_node_list_and_token_map(self._connection, force_token_rebuild=force_token_rebuild) + return True except ReferenceError: pass # our weak reference to the Cluster is no good except Exception: log.debug("[control connection] Error refreshing node list and token map", exc_info=True) self._signal_error() + return False def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, force_token_rebuild=False): @@ -2387,6 +2417,9 @@ def return_connection(self, connection): if connection is self._connection and (connection.is_defunct or connection.is_closed): self.reconnect() + def set_meta_refresh_enabled(self, enabled): + self._meta_refresh_enabled = enabled + def _stop_scheduler(scheduler, thread): try: @@ -2435,8 +2468,7 @@ def schedule_unique(self, delay, fn, *args): if task not in self._scheduled_tasks: self._insert_task(delay, task) else: - log.debug("Ignoring schedule_unique for already-scheduled task: %r", fn) - + log.debug("Ignoring schedule_unique for already-scheduled task: %r", task) def _insert_task(self, delay, task): if not self.is_shutdown: @@ -2444,7 +2476,7 @@ def _insert_task(self, delay, task): self._scheduled_tasks.add(task) self._queue.put_nowait((run_at, task)) else: - log.debug("Ignoring scheduled function after shutdown: %r", fn) + log.debug("Ignoring scheduled task after shutdown: %r", task) def run(self): while True: @@ -2480,9 +2512,13 @@ def _log_if_failed(self, future): def refresh_schema_and_set_result(keyspace, table, usertype, control_conn, response_future): try: - log.debug("Refreshing schema in response to schema change. Keyspace: %s; Table: %s, Type: %s", - keyspace, table, usertype) - control_conn._refresh_schema(response_future._connection, keyspace, table, usertype) + if control_conn._meta_refresh_enabled: + log.debug("Refreshing schema in response to schema change. Keyspace: %s; Table: %s, Type: %s", + keyspace, table, usertype) + control_conn._refresh_schema(response_future._connection, keyspace, table, usertype) + else: + log.debug("Skipping schema refresh in response to schema change because meta refresh is disabled; " + "Keyspace: %s; Table: %s, Type: %s", keyspace, table, usertype) except Exception: log.exception("Exception refreshing schema in response to schema change:") response_future.session.submit( From 69830d3bf534b1336326768b14664c1c72300be7 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 28 Jan 2015 15:56:24 -0600 Subject: [PATCH 1186/3726] Add new api attributes and functions to docs. --- cassandra/cluster.py | 3 +++ docs/api/cassandra/cluster.rst | 9 +++++++++ 2 files changed, 12 insertions(+) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index acf30efb01..b639af6992 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1154,6 +1154,9 @@ def set_meta_refresh_enabled(self, enabled): This applies to both schema and node topology. Disabling this is useful to minimize refreshes during multiple changes. + + Meta refresh must be enabled for the driver to become aware of any cluster + topology changes or schema updates. """ self.control_connection.set_meta_refresh_enabled(bool(enabled)) diff --git a/docs/api/cassandra/cluster.rst b/docs/api/cassandra/cluster.rst index f0a9a312dc..b1b1aebb0a 100644 --- a/docs/api/cassandra/cluster.rst +++ b/docs/api/cassandra/cluster.rst @@ -41,6 +41,10 @@ .. autoattribute:: idle_heartbeat_interval + .. autoattribute:: schema_event_refresh_window + + .. autoattribute:: topology_event_refresh_window + .. automethod:: connect .. automethod:: shutdown @@ -61,6 +65,11 @@ .. automethod:: refresh_schema + .. automethod:: refresh_nodes + + .. automethod:: set_meta_refresh_enabled + + .. autoclass:: Session () .. autoattribute:: default_timeout From 5a9174f309322cbbb38cbf93f77554053669f2bc Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 29 Jan 2015 08:51:40 -0600 Subject: [PATCH 1187/3726] Update unit test --- tests/unit/test_control_connection.py | 25 ++++++++++++++++--------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/tests/unit/test_control_connection.py b/tests/unit/test_control_connection.py index 12d09781e5..de95dbf475 100644 --- a/tests/unit/test_control_connection.py +++ b/tests/unit/test_control_connection.py @@ -73,7 +73,7 @@ def __init__(self): self.scheduler = Mock(spec=_Scheduler) self.executor = Mock(spec=ThreadPoolExecutor) - def add_host(self, address, datacenter, rack, signal=False): + def add_host(self, address, datacenter, rack, signal=False, refresh_nodes=True): host = Host(address, SimpleConvictionPolicy, datacenter, rack) self.added_hosts.append(host) return host @@ -131,7 +131,7 @@ def setUp(self): self.connection = MockConnection() self.time = FakeTime() - self.control_connection = ControlConnection(self.cluster, timeout=1) + self.control_connection = ControlConnection(self.cluster, 1, 0, 0) self.control_connection._connection = self.connection self.control_connection._time = self.time @@ -345,39 +345,44 @@ def test_handle_topology_change(self): 'change_type': 'NEW_NODE', 'address': ('1.2.3.4', 9000) } + self.cluster.scheduler.reset_mock() self.control_connection._handle_topology_change(event) - self.cluster.scheduler.schedule.assert_called_with(ANY, self.control_connection.refresh_node_list_and_token_map) + self.cluster.scheduler.schedule_unique.assert_called_once_with(ANY, self.control_connection.refresh_node_list_and_token_map) event = { 'change_type': 'REMOVED_NODE', 'address': ('1.2.3.4', 9000) } + self.cluster.scheduler.reset_mock() self.control_connection._handle_topology_change(event) - self.cluster.scheduler.schedule.assert_called_with(ANY, self.cluster.remove_host, None) + self.cluster.scheduler.schedule_unique.assert_called_once_with(ANY, self.cluster.remove_host, None) event = { 'change_type': 'MOVED_NODE', 'address': ('1.2.3.4', 9000) } + self.cluster.scheduler.reset_mock() self.control_connection._handle_topology_change(event) - self.cluster.scheduler.schedule.assert_called_with(ANY, self.control_connection.refresh_node_list_and_token_map) + self.cluster.scheduler.schedule_unique.assert_called_once_with(ANY, self.control_connection.refresh_node_list_and_token_map) def test_handle_status_change(self): event = { 'change_type': 'UP', 'address': ('1.2.3.4', 9000) } + self.cluster.scheduler.reset_mock() self.control_connection._handle_status_change(event) - self.cluster.scheduler.schedule.assert_called_with(ANY, self.control_connection.refresh_node_list_and_token_map) + self.cluster.scheduler.schedule_unique.assert_called_once_with(ANY, self.control_connection.refresh_node_list_and_token_map) # do the same with a known Host event = { 'change_type': 'UP', 'address': ('192.168.1.0', 9000) } + self.cluster.scheduler.reset_mock() self.control_connection._handle_status_change(event) host = self.cluster.metadata.hosts['192.168.1.0'] - self.cluster.scheduler.schedule.assert_called_with(ANY, self.cluster.on_up, host) + self.cluster.scheduler.schedule_unique.assert_called_once_with(ANY, self.cluster.on_up, host) self.cluster.scheduler.schedule.reset_mock() event = { @@ -404,9 +409,11 @@ def test_handle_schema_change(self): 'keyspace': 'ks1', 'table': 'table1' } + self.cluster.scheduler.reset_mock() self.control_connection._handle_schema_change(event) - self.cluster.executor.submit.assert_called_with(self.control_connection.refresh_schema, 'ks1', 'table1', None) + self.cluster.scheduler.schedule_unique.assert_called_once_with(0.0, self.control_connection.refresh_schema, 'ks1', 'table1', None) + self.cluster.scheduler.reset_mock() event['table'] = None self.control_connection._handle_schema_change(event) - self.cluster.executor.submit.assert_called_with(self.control_connection.refresh_schema, 'ks1', None, None) + self.cluster.scheduler.schedule_unique.assert_called_once_with(0.0, self.control_connection.refresh_schema, 'ks1', None, None) From 8cdc36fb68c21590090fd87ba709ec68cf67c435 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 29 Jan 2015 10:27:08 -0600 Subject: [PATCH 1188/3726] Return connection to owner if closed during heartbeat. This makes the driver notice nodes down even if it is the control connection and there is no request traffic hitting the pool. --- cassandra/connection.py | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/cassandra/connection.py b/cassandra/connection.py index a7d98ecb3e..9a41a0d61c 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -802,15 +802,19 @@ def run(self): try: for connections, owner in [(o.get_connections(), o) for o in self._get_connection_holders()]: for connection in connections: - if not (connection.is_defunct or connection.is_closed) and connection.is_idle: - try: - futures.append(HeartbeatFuture(connection, owner)) - except Exception: - log.warning("Failed sending heartbeat message on connection (%s) to %s", - id(connection), connection.host, exc_info=True) - failed_connections.append((connection, owner)) + if not (connection.is_defunct or connection.is_closed): + if connection.is_idle: + try: + futures.append(HeartbeatFuture(connection, owner)) + except Exception: + log.warning("Failed sending heartbeat message on connection (%s) to %s", + id(connection), connection.host, exc_info=True) + failed_connections.append((connection, owner)) + else: + connection.reset_idle() else: - connection.reset_idle() + # make sure the owner sees this defunt/closed connection + owner.return_connection(connection) for f in futures: connection = f.connection From 3a59deda4e8f19aa5f369755b5c01da99b05b254 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 29 Jan 2015 11:20:17 -0600 Subject: [PATCH 1189/3726] Sleep for auth test, allowing C* to setup internally --- tests/integration/standard/test_authentication.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tests/integration/standard/test_authentication.py b/tests/integration/standard/test_authentication.py index f248888823..1534947a28 100644 --- a/tests/integration/standard/test_authentication.py +++ b/tests/integration/standard/test_authentication.py @@ -13,6 +13,7 @@ # limitations under the License. import logging +import time from cassandra.cluster import Cluster, NoHostAvailable from cassandra.auth import PlainTextAuthProvider, SASLClient, SaslAuthProvider @@ -36,7 +37,10 @@ def setup_module(): 'authorizer': 'CassandraAuthorizer'} ccm_cluster.set_configuration_options(config_options) log.debug("Starting ccm test cluster with %s", config_options) - ccm_cluster.start(wait_for_binary_proto=True) + ccm_cluster.start(wait_for_binary_proto=True, wait_other_notice=True) + # there seems to be some race, with some versions of C* taking longer to + # get the auth (and default user) setup. Sleep here to give it a chance + time.sleep(2) def teardown_module(): From 453e3738d70febe66b5986997e2ffa25feee5bb9 Mon Sep 17 00:00:00 2001 From: Kishan Karunaratne Date: Fri, 30 Jan 2015 13:52:25 -0800 Subject: [PATCH 1190/3726] enable collections inside UDTs via freezing Tuple --- tests/integration/standard/test_udts.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/integration/standard/test_udts.py b/tests/integration/standard/test_udts.py index 87e76b3559..9258513047 100644 --- a/tests/integration/standard/test_udts.py +++ b/tests/integration/standard/test_udts.py @@ -580,7 +580,6 @@ def test_nonprimitive_datatypes(self): """ Test for inserting various types of DATA_TYPE_NON_PRIMITIVE into UDT's """ - raise unittest.SkipTest("Collections are not allowed in UDTs") c = Cluster(protocol_version=PROTOCOL_VERSION) s = c.connect() @@ -599,6 +598,9 @@ def test_nonprimitive_datatypes(self): if nonprim_datatype == "map": type_string = "{0}_{1} {2}<{3}, {3}>".format(chr(start_index + i), chr(start_index + j), nonprim_datatype, datatype) + elif nonprim_datatype == "tuple": + type_string = "{0}_{1} frozen<{2}<{3}>>".format(chr(start_index + i), chr(start_index + j), + nonprim_datatype, datatype) else: type_string = "{0}_{1} {2}<{3}>".format(chr(start_index + i), chr(start_index + j), nonprim_datatype, datatype) From 5acb8cd5bd3729fbb1dd92483ca2c65ea624d5cb Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 30 Jan 2015 17:58:07 -0600 Subject: [PATCH 1191/3726] Improved handling of Time type. still needs test, doc update --- cassandra/cqltypes.py | 49 ++++------------------ cassandra/encoder.py | 3 +- cassandra/util.py | 96 ++++++++++++++++++++++++++++++++++++++++++- 3 files changed, 104 insertions(+), 44 deletions(-) diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index a9ba4aac90..9ce04b004b 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -48,15 +48,13 @@ int32_pack, int32_unpack, int64_pack, int64_unpack, float_pack, float_unpack, double_pack, double_unpack, varint_pack, varint_unpack) -from cassandra.util import OrderedMap, sortedset +from cassandra.util import OrderedMap, sortedset, Time apache_cassandra_type_prefix = 'org.apache.cassandra.db.marshal.' if six.PY3: _number_types = frozenset((int, float)) - _time_types = frozenset((int,)) - _date_types = frozenset((int,)) long = int def _name_from_hex_string(encoded_name): @@ -64,8 +62,6 @@ def _name_from_hex_string(encoded_name): return bin_str.decode('ascii') else: _number_types = frozenset((int, long, float)) - _time_types = frozenset((int, long)) - _date_types = frozenset((int, long)) _name_from_hex_string = unhexlify @@ -633,7 +629,7 @@ class SimpleDateType(_CassandraType): def validate(cls, val): if isinstance(val, six.string_types): val = cls.interpret_simpledate_string(val) - elif (not isinstance(val, datetime.date)) and (type(val) not in _date_types): + elif (not isinstance(val, datetime.date)) and not isinstance(val, six.integer_types): raise TypeError('SimpleDateType arg must be a datetime.date, unsigned integer, or string in the format YYYY-MM-DD') return val @@ -662,55 +658,24 @@ def deserialize(byts, protocol_version): class TimeType(_CassandraType): typename = 'time' - ONE_MICRO = 1000 - ONE_MILLI = 1000 * ONE_MICRO - ONE_SECOND = 1000 * ONE_MILLI - ONE_MINUTE = 60 * ONE_SECOND - ONE_HOUR = 60 * ONE_MINUTE @classmethod def validate(cls, val): - if isinstance(val, six.string_types): - val = cls.interpret_timestring(val) - elif (not isinstance(val, datetime.time)) and (type(val) not in _time_types): - raise TypeError('TimeType arguments must be a string or whole number') + if not isinstance(val, Time): + val = Time(val) return val - @staticmethod - def interpret_timestring(val): - try: - nano = 0 - parts = val.split('.') - base_time = time.strptime(parts[0], "%H:%M:%S") - nano = (base_time.tm_hour * TimeType.ONE_HOUR + - base_time.tm_min * TimeType.ONE_MINUTE + - base_time.tm_sec * TimeType.ONE_SECOND) - - if len(parts) > 1: - # right pad to 9 digits - nano_time_str = parts[1] + "0" * (9 - len(parts[1])) - nano += int(nano_time_str) - - return nano - except ValueError: - raise ValueError("can't interpret %r as a time" % (val,)) - @staticmethod def serialize(val, protocol_version): - # Values of the @time@ type are encoded as 64-bit signed integers - # representing the number of nanoseconds since midnight. try: - nano = (val.hour * TimeType.ONE_HOUR + - val.minute * TimeType.ONE_MINUTE + - val.second * TimeType.ONE_SECOND + - val.microsecond * TimeType.ONE_MICRO) + nano = val.nanosecond_time except AttributeError: - nano = val + nano = Time(val).nanosecond_time return int64_pack(nano) @staticmethod def deserialize(byts, protocol_version): - return int64_unpack(byts) + return Time(int64_unpack(byts)) class UTF8Type(_CassandraType): diff --git a/cassandra/encoder.py b/cassandra/encoder.py index 516945b3f2..739c0f64c3 100644 --- a/cassandra/encoder.py +++ b/cassandra/encoder.py @@ -28,7 +28,7 @@ from uuid import UUID import six -from cassandra.util import OrderedDict, OrderedMap, sortedset +from cassandra.util import OrderedDict, OrderedMap, sortedset, Time if six.PY3: long = int @@ -75,6 +75,7 @@ def __init__(self): datetime.datetime: self.cql_encode_datetime, datetime.date: self.cql_encode_date, datetime.time: self.cql_encode_time, + Time: self.cql_encode_time, dict: self.cql_encode_map_collection, OrderedDict: self.cql_encode_map_collection, OrderedMap: self.cql_encode_map_collection, diff --git a/cassandra/util.py b/cassandra/util.py index cf5e0f4a2b..09eba8254c 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -563,7 +563,7 @@ def _intersect(self, other): class OrderedMap(Mapping): ''' - An ordered map that accepts non-hashable types for keys. It also maintains the + An ordered map that accepts non-hashable types for keys. It also maintains the insertion order of items, behaving as OrderedDict in that regard. These maps are constructed and read just as normal mapping types, exept that they may contain arbitrary collections and other non-hashable items as keys:: @@ -653,3 +653,97 @@ def __str__(self): @staticmethod def _serialize_key(key): return cPickle.dumps(key) + + +import datetime +import time + +if six.PY3: + long = int + + +class Time(object): + ''' + Idealized time, independent of day. + + Up to nanosecond resolution + ''' + + MICRO = 1000 + MILLI = 1000 * MICRO + SECOND = 1000 * MILLI + MINUTE = 60 * SECOND + HOUR = 60 * MINUTE + DAY = 24 * HOUR + + nanosecond_time = 0 + + def __init__(self, value): + if isinstance(value, six.integer_types): + self._from_timestamp(value) + elif isinstance(value, datetime.time): + self._from_time(value) + elif isinstance(value, six.string_types): + self._from_timestring(value) + else: + raise TypeError('Time arguments must be a whole number, datetime.time, or string') + + @property + def hour(self): + return self.nanosecond_time // Time.HOUR + + @property + def minute(self): + minutes = self.nanosecond_time // Time.MINUTE + return minutes % 60 + + @property + def second(self): + seconds = self.nanosecond_time // Time.SECOND + return seconds % 60 + + @property + def nanosecond(self): + return self.nanosecond_time % Time.SECOND + + def _from_timestamp(self, t): + if t >= Time.DAY: + raise ValueError("value must be less than number of nanoseconds in a day (%d)" % Time.DAY) + self.nanosecond_time = t + + def _from_timestring(self, s): + try: + parts = s.split('.') + base_time = time.strptime(parts[0], "%H:%M:%S") + self.nanosecond_time = (base_time.tm_hour * Time.HOUR + + base_time.tm_min * Time.MINUTE + + base_time.tm_sec * Time.SECOND) + + if len(parts) > 1: + # right pad to 9 digits + nano_time_str = parts[1] + "0" * (9 - len(parts[1])) + self.nanosecond_time += int(nano_time_str) + + except ValueError: + raise ValueError("can't interpret %r as a time" % (s,)) + + def _from_time(self, t): + self.nanosecond_time = (t.hour * Time.HOUR + + t.minute * Time.MINUTE + + t.second * Time.SECOND + + t.microsecond * Time.MICRO) + + def __eq__(self, other): + if isinstance(other, Time): + return self.nanosecond_time == other.nanosecond_time + + return self.nanosecond_time % Time.MICRO == 0 and \ + datetime.time(hour=self.hour, minute=self.minute, second=self.second, + microsecond=self.nanosecond // Time.MICRO) == other + + def __repr__(self): + return "Time(%s)" % self.nanosecond_time + + def __str__(self): + return "%02d:%02d:%02d.%09d" % (self.hour, self.minute, + self.second, self.nanosecond) From 74faba63e4913a8956068a00146d7333e9f709c1 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 2 Feb 2015 15:35:59 -0600 Subject: [PATCH 1192/3726] Fix OrderedMap init from built-in dict. also make __getitem__ KeyError return friendly key instead of pickled value --- cassandra/util.py | 9 ++++++--- tests/unit/test_orderedmap.py | 5 +++++ 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/cassandra/util.py b/cassandra/util.py index 09eba8254c..73beea8b2e 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -602,7 +602,7 @@ def __init__(self, *args, **kwargs): e = args[0] if callable(getattr(e, 'keys', None)): for k in e.keys(): - self._items.append((k, e[k])) + self._insert(k, e[k]) else: for k, v in e: self._insert(k, v) @@ -620,8 +620,11 @@ def _insert(self, key, value): self._index[flat_key] = len(self._items) - 1 def __getitem__(self, key): - index = self._index[self._serialize_key(key)] - return self._items[index][1] + try: + index = self._index[self._serialize_key(key)] + return self._items[index][1] + except KeyError: + raise KeyError(str(key)) def __iter__(self): for i in self._items: diff --git a/tests/unit/test_orderedmap.py b/tests/unit/test_orderedmap.py index 3cee3a11c5..48666fa9eb 100644 --- a/tests/unit/test_orderedmap.py +++ b/tests/unit/test_orderedmap.py @@ -32,6 +32,11 @@ def test_init(self): self.assertEqual(a, builtin) self.assertEqual(OrderedMap([(1, 1), (1, 2)]), {1: 2}) + d = OrderedMap({'': 3}, key1='v1', key2='v2') + self.assertEqual(d[''], 3) + self.assertEqual(d['key1'], 'v1') + self.assertEqual(d['key2'], 'v2') + def test_contains(self): keys = ['first', 'middle', 'last'] From 91e5afbe72e6491833af1df5bcce14e011c6f0b7 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 2 Feb 2015 15:40:11 -0600 Subject: [PATCH 1193/3726] Make c.util.Time comparable to integer types. --- cassandra/util.py | 3 +++ tests/unit/test_marshalling.py | 4 ++-- tests/unit/test_types.py | 8 ++++---- 3 files changed, 9 insertions(+), 6 deletions(-) diff --git a/cassandra/util.py b/cassandra/util.py index 73beea8b2e..14bc09cb79 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -740,6 +740,9 @@ def __eq__(self, other): if isinstance(other, Time): return self.nanosecond_time == other.nanosecond_time + if isinstance(other, six.integer_types): + return self.nanosecond_time == other + return self.nanosecond_time % Time.MICRO == 0 and \ datetime.time(hour=self.hour, minute=self.minute, second=self.second, microsecond=self.nanosecond // Time.MICRO) == other diff --git a/tests/unit/test_marshalling.py b/tests/unit/test_marshalling.py index 4adba74878..dcd4b772c2 100644 --- a/tests/unit/test_marshalling.py +++ b/tests/unit/test_marshalling.py @@ -24,7 +24,7 @@ from uuid import UUID from cassandra.cqltypes import lookup_casstype -from cassandra.util import OrderedMap, sortedset +from cassandra.util import OrderedMap, sortedset, Time marshalled_value_pairs = ( # binary form, type, python native type @@ -81,7 +81,7 @@ (b'\x00\x01\x00\x10\xafYC\xa3\xea<\x11\xe1\xabc\xc4,\x03"y\xf0', 'ListType(TimeUUIDType)', [UUID(bytes=b'\xafYC\xa3\xea<\x11\xe1\xabc\xc4,\x03"y\xf0')]), (b'\x80\x00\x00\x01', 'SimpleDateType', date(1970,1,2)), (b'\x7f\xff\xff\xff', 'SimpleDateType', date(1969,12,31)), - (b'\x00\x00\x00\x00\x00\x00\x00\x01', 'TimeType', 1) + (b'\x00\x00\x00\x00\x00\x00\x00\x01', 'TimeType', Time(1)) ) ordered_map_value = OrderedMap([(u'\u307fbob', 199), diff --git a/tests/unit/test_types.py b/tests/unit/test_types.py index e5fb7575be..b5b70b2f62 100644 --- a/tests/unit/test_types.py +++ b/tests/unit/test_types.py @@ -11,8 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import tempfile - try: import unittest2 as unittest except ImportError: @@ -21,18 +19,20 @@ from binascii import unhexlify import calendar import datetime +import tempfile import time + import cassandra from cassandra.cqltypes import (BooleanType, lookup_casstype_simple, lookup_casstype, LongType, DecimalType, SetType, cql_typename, CassandraType, UTF8Type, parse_casstype_args, SimpleDateType, TimeType, EmptyValue, _CassandraType, DateType, int64_pack) -from cassandra.query import named_tuple_factory +from cassandra.encoder import cql_quote from cassandra.protocol import (write_string, read_longstring, write_stringmap, read_stringmap, read_inet, write_inet, read_string, write_longstring) -from cassandra.encoder import cql_quote +from cassandra.query import named_tuple_factory class TypeTests(unittest.TestCase): From 586720a886c778785a159a670c63f5aadb646d53 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 2 Feb 2015 16:46:49 -0600 Subject: [PATCH 1194/3726] date, time conditional in 'type' integration test --- tests/integration/standard/test_types.py | 451 ++++++++++------------- 1 file changed, 204 insertions(+), 247 deletions(-) diff --git a/tests/integration/standard/test_types.py b/tests/integration/standard/test_types.py index 01da0dad39..95a70d45d6 100644 --- a/tests/integration/standard/test_types.py +++ b/tests/integration/standard/test_types.py @@ -47,20 +47,48 @@ def setup_module(): class TypeTests(unittest.TestCase): - def setUp(self): - self._cass_version, self._cql_version = get_server_versions() + _types_table_created = False + + @classmethod + def setup_class(cls): + cls._cass_version, cls._cql_version = get_server_versions() + + cls._col_types = ['text', + 'ascii', + 'bigint', + 'boolean', + 'decimal', + 'double', + 'float', + 'inet', + 'int', + 'list', + 'set', + 'map', + 'timestamp', + 'uuid', + 'timeuuid', + 'varchar', + 'varint'] + + if cls._cass_version >= (2, 1, 4): + cls._col_types.extend(('date', 'time')) + print cls._col_types + + cls._session = Cluster().connect() + cls._session.execute("CREATE KEYSPACE typetests WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1'}") + cls._session.set_keyspace("typetests") + + @classmethod + def teardown_class(cls): + cls._session.execute("DROP KEYSPACE typetests") + cls._session.shutdown() def test_blob_type_as_string(self): - c = Cluster(protocol_version=PROTOCOL_VERSION) - s = c.connect() + s = self._session s.execute(""" - CREATE KEYSPACE typetests_blob1 - WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1'} - """) - s.set_keyspace("typetests_blob1") - s.execute(""" - CREATE TABLE mytable ( + CREATE TABLE blobstring ( a ascii, b blob, PRIMARY KEY (a) @@ -72,7 +100,7 @@ def test_blob_type_as_string(self): b'blobyblob' ] - query = 'INSERT INTO mytable (a, b) VALUES (%s, %s)' + query = 'INSERT INTO blobstring (a, b) VALUES (%s, %s)' # In python 3, the 'bytes' type is treated as a blob, so we can # correctly encode it with hex notation. @@ -96,22 +124,15 @@ def test_blob_type_as_string(self): bytearray(b'blobyblob') ] - results = s.execute("SELECT * FROM mytable") + results = s.execute("SELECT * FROM blobstring") for expected, actual in zip(expected_vals, results[0]): self.assertEqual(expected, actual) def test_blob_type_as_bytearray(self): - c = Cluster(protocol_version=PROTOCOL_VERSION) - s = c.connect() - + s = self._session s.execute(""" - CREATE KEYSPACE typetests_blob2 - WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1'} - """) - s.set_keyspace("typetests_blob2") - s.execute(""" - CREATE TABLE mytable ( + CREATE TABLE blobbytes ( a ascii, b blob, PRIMARY KEY (a) @@ -123,7 +144,7 @@ def test_blob_type_as_bytearray(self): bytearray(b'blob1') ] - query = 'INSERT INTO mytable (a, b) VALUES (%s, %s);' + query = 'INSERT INTO blobbytes (a, b) VALUES (%s, %s);' s.execute(query, params) expected_vals = [ @@ -131,58 +152,33 @@ def test_blob_type_as_bytearray(self): bytearray(b'blob1') ] - results = s.execute("SELECT * FROM mytable") + results = s.execute("SELECT * FROM blobbytes") for expected, actual in zip(expected_vals, results[0]): self.assertEqual(expected, actual) - create_type_table = """ - CREATE TABLE mytable ( - a text, - b text, - c ascii, - d bigint, - f boolean, - g decimal, - h double, - i float, - j inet, - k int, - l list, - m set, - n map, - o text, - p timestamp, - q uuid, - r timeuuid, - s varchar, - t varint, - u date, - v time, - PRIMARY KEY (a, b) - ) - """ + def _create_all_types_table(self): + if not self._types_table_created: + TypeTests._col_names = ["%s_col" % col_type.translate(None, '<> ,') for col_type in self._col_types] + cql = "CREATE TABLE alltypes ( key int PRIMARY KEY, %s)" % ','.join("%s %s" % name_type for name_type in zip(self._col_names, self._col_types)) + self._session.execute(cql) + TypeTests._types_table_created = True def test_basic_types(self): - c = Cluster(protocol_version=PROTOCOL_VERSION) - s = c.connect() - s.execute(""" - CREATE KEYSPACE typetests - WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1'} - """) - s.set_keyspace("typetests") - s.execute(self.create_type_table) + + s = self._session.cluster.connect() + s.set_keyspace(self._session.keyspace) + + self._create_all_types_table() v1_uuid = uuid1() v4_uuid = uuid4() mydatetime = datetime(2013, 12, 31, 23, 59, 59, 999000) - mydate = date(2015, 1, 15) - mytime = time(16, 47, 25, 7) + # this could use some rework tying column types to names (instead of relying on position) params = [ - "sometext", - "sometext", - "ascii", # ascii + "text", + "ascii", 12345678923456789, # bigint True, # boolean Decimal('1.234567890123456789'), # decimal @@ -193,20 +189,16 @@ def test_basic_types(self): ['a', 'b', 'c'], # list collection set([1, 2, 3]), # set collection {'a': 1, 'b': 2}, # map collection - "text", # text mydatetime, # timestamp v4_uuid, # uuid v1_uuid, # timeuuid u"sometext\u1234", # varchar 123456789123456789123456789, # varint - mydate, # date - mytime ] - expected_vals = ( - "sometext", - "sometext", - "ascii", # ascii + expected_vals = [ + "text", + "ascii", 12345678923456789, # bigint True, # boolean Decimal('1.234567890123456789'), # decimal @@ -217,92 +209,93 @@ def test_basic_types(self): ['a', 'b', 'c'], # list collection sortedset((1, 2, 3)), # set collection {'a': 1, 'b': 2}, # map collection - "text", # text mydatetime, # timestamp v4_uuid, # uuid v1_uuid, # timeuuid u"sometext\u1234", # varchar 123456789123456789123456789, # varint - mydate, # date - 60445000007000 # time - ) + ] - s.execute(""" - INSERT INTO mytable (a, b, c, d, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v) - VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s) - """, params) + if self._cass_version >= (2, 1, 4): + mydate = date(2015, 1, 15) + mytime = time(16, 47, 25, 7) + + params.append(mydate) + params.append(mytime) + + expected_vals.append(mydate) + expected_vals.append(mytime) - results = s.execute("SELECT * FROM mytable") + columns_string = ','.join(self._col_names) + placeholders = ', '.join(["%s"] * len(self._col_names)) + s.execute("INSERT INTO alltypes (key, %s) VALUES (0, %s)" % + (columns_string, placeholders), params) + + results = s.execute("SELECT %s FROM alltypes WHERE key=0" % columns_string) for expected, actual in zip(expected_vals, results[0]): - self.assertEqual(expected, actual) + self.assertEqual(actual, expected) # try the same thing with a prepared statement - prepared = s.prepare(""" - INSERT INTO mytable (a, b, c, d, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v) - VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) - """) - + placeholders = ','.join(["?"] * len(self._col_names)) + prepared = s.prepare("INSERT INTO alltypes (key, %s) VALUES (1, %s)" % + (columns_string, placeholders)) s.execute(prepared.bind(params)) - results = s.execute("SELECT * FROM mytable") + results = s.execute("SELECT %s FROM alltypes WHERE key=1" % columns_string) for expected, actual in zip(expected_vals, results[0]): - self.assertEqual(expected, actual) + self.assertEqual(actual, expected) # query with prepared statement - prepared = s.prepare(""" - SELECT a, b, c, d, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v FROM mytable - """) - results = s.execute(prepared.bind(())) + prepared = s.prepare("SELECT %s FROM alltypes WHERE key=?" % columns_string) + results = s.execute(prepared.bind((1,))) for expected, actual in zip(expected_vals, results[0]): - self.assertEqual(expected, actual) + self.assertEqual(actual, expected) # query with prepared statement, no explicit columns - prepared = s.prepare("""SELECT * FROM mytable""") + s.row_factory = dict_factory + prepared = s.prepare("SELECT * FROM alltypes") results = s.execute(prepared.bind(())) - for expected, actual in zip(expected_vals, results[0]): - self.assertEqual(expected, actual) + row = results[0] + for expected, name in zip(expected_vals, self._col_names): + self.assertEqual(row[name], expected) - def test_empty_strings_and_nones(self): - c = Cluster(protocol_version=PROTOCOL_VERSION) - s = c.connect() - s.execute(""" - CREATE KEYSPACE test_empty_strings_and_nones - WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1'} - """) - s.set_keyspace("test_empty_strings_and_nones") - s.execute(self.create_type_table) + s.shutdown() - s.execute("INSERT INTO mytable (a, b) VALUES ('a', 'b')") + def test_empty_strings_and_nones(self): + s = self._session.cluster.connect() + s.set_keyspace(self._session.keyspace) s.row_factory = dict_factory - results = s.execute(""" - SELECT c, d, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v FROM mytable - """) + + self._create_all_types_table() + + columns_string = ','.join(self._col_names) + s.execute("INSERT INTO alltypes (key) VALUES (2)") + results = s.execute("SELECT %s FROM alltypes WHERE key=2" % columns_string) self.assertTrue(all(x is None for x in results[0].values())) - prepared = s.prepare(""" - SELECT c, d, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v FROM mytable - """) - results = s.execute(prepared.bind(())) + prepared = s.prepare("SELECT %s FROM alltypes WHERE key=?" % columns_string) + results = s.execute(prepared.bind((2,))) self.assertTrue(all(x is None for x in results[0].values())) # insert empty strings for string-like fields and fetch them - s.execute("INSERT INTO mytable (a, b, c, o, s, l, n) VALUES ('a', 'b', %s, %s, %s, %s, %s)", - ('', '', '', [''], {'': 3})) - self.assertEqual( - {'c': '', 'o': '', 's': '', 'l': [''], 'n': OrderedMap({'': 3})}, - s.execute("SELECT c, o, s, l, n FROM mytable WHERE a='a' AND b='b'")[0]) - - self.assertEqual( - {'c': '', 'o': '', 's': '', 'l': [''], 'n': OrderedMap({'': 3})}, - s.execute(s.prepare("SELECT c, o, s, l, n FROM mytable WHERE a='a' AND b='b'"), [])[0]) + expected_values = {'text_col': '', 'ascii_col': '', 'varchar_col': '', 'listtext_col': [''], 'maptextint_col': OrderedMap({'': 3})} + columns_string = ','.join(expected_values.keys()) + placeholders = ','.join(["%s"] * len(expected_values)) + s.execute("INSERT INTO alltypes (key, %s) VALUES (3, %s)" % (columns_string, placeholders), expected_values.values()) + self.assertEqual(expected_values, + s.execute("SELECT %s FROM alltypes WHERE key=3" % columns_string)[0]) + self.assertEqual(expected_values, + s.execute(s.prepare("SELECT %s FROM alltypes WHERE key=?" % columns_string), (3,))[0]) # non-string types shouldn't accept empty strings - for col in ('d', 'f', 'g', 'h', 'i', 'k', 'l', 'm', 'n', 'q', 'r', 't'): - query = "INSERT INTO mytable (a, b, %s) VALUES ('a', 'b', %%s)" % (col, ) + for col in ('bigint_col', 'boolean_col', 'decimal_col', 'double_col', + 'float_col', 'int_col', 'listtext_col', 'setint_col', + 'maptextint_col', 'uuid_col', 'timeuuid_col', 'varint_col'): + query = "INSERT INTO alltypes (key, %s) VALUES (4, %%s)" % col try: s.execute(query, ['']) except InvalidRequest: @@ -311,7 +304,7 @@ def test_empty_strings_and_nones(self): self.fail("Expected an InvalidRequest error when inserting an " "emptry string for column %s" % (col, )) - prepared = s.prepare("INSERT INTO mytable (a, b, %s) VALUES ('a', 'b', ?)" % (col, )) + prepared = s.prepare("INSERT INTO alltypes (key, %s) VALUES (4, ?)" % col) try: s.execute(prepared, ['']) except TypeError: @@ -321,67 +314,53 @@ def test_empty_strings_and_nones(self): "emptry string for column %s with a prepared statement" % (col, )) # insert values for all columns - values = ['a', 'b', 'a', 1, True, Decimal('1.0'), 0.1, 0.1, - "1.2.3.4", 1, ['a'], set([1]), {'a': 1}, 'a', - datetime.now(), uuid4(), uuid1(), 'a', 1, '2014-01-01', '01:02:03.456789012'] - s.execute(""" - INSERT INTO mytable (a, b, c, d, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v) - VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s) - """, values) + values = ['text', 'ascii', 1, True, Decimal('1.0'), 0.1, 0.1, + "1.2.3.4", 1, ['a'], set([1]), {'a': 1}, + datetime.now(), uuid4(), uuid1(), 'a', 1] + if self._cass_version >= (2, 1, 4): + values.append('2014-01-01') + values.append('01:02:03.456789012') + + columns_string = ','.join(self._col_names) + placeholders = ','.join(["%s"] * len(self._col_names)) + insert = "INSERT INTO alltypes (key, %s) VALUES (5, %s)" % (columns_string, placeholders) + s.execute(insert, values) # then insert None, which should null them out - null_values = values[:2] + ([None] * (len(values) - 2)) - s.execute(""" - INSERT INTO mytable (a, b, c, d, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v) - VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s) - """, null_values) + null_values = [None] * len(self._col_names) + s.execute(insert, null_values) - results = s.execute(""" - SELECT c, d, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v FROM mytable - """) + select = "SELECT %s FROM alltypes WHERE key=5" % columns_string + results = s.execute(select) self.assertEqual([], [(name, val) for (name, val) in results[0].items() if val is not None]) - prepared = s.prepare(""" - SELECT c, d, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v FROM mytable - """) + prepared = s.prepare(select) results = s.execute(prepared.bind(())) self.assertEqual([], [(name, val) for (name, val) in results[0].items() if val is not None]) # do the same thing again, but use a prepared statement to insert the nulls - s.execute(""" - INSERT INTO mytable (a, b, c, d, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v) - VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s) - """, values) - prepared = s.prepare(""" - INSERT INTO mytable (a, b, c, d, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v) - VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) - """) + s.execute(insert, values) + + placeholders = ','.join(["?"] * len(self._col_names)) + prepared = s.prepare("INSERT INTO alltypes (key, %s) VALUES (5, %s)" % (columns_string, placeholders)) s.execute(prepared, null_values) - results = s.execute(""" - SELECT c, d, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v FROM mytable - """) + results = s.execute(select) self.assertEqual([], [(name, val) for (name, val) in results[0].items() if val is not None]) - prepared = s.prepare(""" - SELECT c, d, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v FROM mytable - """) + prepared = s.prepare(select) results = s.execute(prepared.bind(())) self.assertEqual([], [(name, val) for (name, val) in results[0].items() if val is not None]) + s.shutdown() + def test_empty_values(self): - c = Cluster(protocol_version=PROTOCOL_VERSION) - s = c.connect() - s.execute(""" - CREATE KEYSPACE test_empty_values - WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1'} - """) - s.set_keyspace("test_empty_values") - s.execute("CREATE TABLE mytable (a text PRIMARY KEY, b int)") - s.execute("INSERT INTO mytable (a, b) VALUES ('a', blobAsInt(0x))") + s = self._session + s.execute("CREATE TABLE empty_values (a text PRIMARY KEY, b int)") + s.execute("INSERT INTO empty_values (a, b) VALUES ('a', blobAsInt(0x))") try: Int32Type.support_empty_values = True - results = s.execute("SELECT b FROM mytable WHERE a='a'")[0] + results = s.execute("SELECT b FROM empty_values WHERE a='a'")[0] self.assertIs(EMPTY, results.b) finally: Int32Type.support_empty_values = False @@ -397,23 +376,19 @@ def test_timezone_aware_datetimes(self): eastern_tz = pytz.timezone('US/Eastern') eastern_tz.localize(dt) - c = Cluster(protocol_version=PROTOCOL_VERSION) - s = c.connect() + s = self._session - s.execute("""CREATE KEYSPACE tz_aware_test - WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1'}""") - s.set_keyspace("tz_aware_test") - s.execute("CREATE TABLE mytable (a ascii PRIMARY KEY, b timestamp)") + s.execute("CREATE TABLE tz_aware (a ascii PRIMARY KEY, b timestamp)") # test non-prepared statement - s.execute("INSERT INTO mytable (a, b) VALUES ('key1', %s)", parameters=(dt,)) - result = s.execute("SELECT b FROM mytable WHERE a='key1'")[0].b + s.execute("INSERT INTO tz_aware (a, b) VALUES ('key1', %s)", parameters=(dt,)) + result = s.execute("SELECT b FROM tz_aware WHERE a='key1'")[0].b self.assertEqual(dt.utctimetuple(), result.utctimetuple()) # test prepared statement - prepared = s.prepare("INSERT INTO mytable (a, b) VALUES ('key2', ?)") + prepared = s.prepare("INSERT INTO tz_aware (a, b) VALUES ('key2', ?)") s.execute(prepared, parameters=(dt,)) - result = s.execute("SELECT b FROM mytable WHERE a='key2'")[0].b + result = s.execute("SELECT b FROM tz_aware WHERE a='key2'")[0].b self.assertEqual(dt.utctimetuple(), result.utctimetuple()) def test_tuple_type(self): @@ -424,38 +399,35 @@ def test_tuple_type(self): if self._cass_version < (2, 1, 0): raise unittest.SkipTest("The tuple type was introduced in Cassandra 2.1") - c = Cluster(protocol_version=PROTOCOL_VERSION) - s = c.connect() + s = self._session.cluster.connect() + s.set_keyspace(self._session.keyspace) # use this encoder in order to insert tuples s.encoder.mapping[tuple] = s.encoder.cql_encode_tuple - s.execute("""CREATE KEYSPACE test_tuple_type - WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1'}""") - s.set_keyspace("test_tuple_type") - s.execute("CREATE TABLE mytable (a int PRIMARY KEY, b frozen>)") + s.execute("CREATE TABLE tuple_type (a int PRIMARY KEY, b frozen>)") # test non-prepared statement complete = ('foo', 123, True) - s.execute("INSERT INTO mytable (a, b) VALUES (0, %s)", parameters=(complete,)) - result = s.execute("SELECT b FROM mytable WHERE a=0")[0] + s.execute("INSERT INTO tuple_type (a, b) VALUES (0, %s)", parameters=(complete,)) + result = s.execute("SELECT b FROM tuple_type WHERE a=0")[0] self.assertEqual(complete, result.b) partial = ('bar', 456) partial_result = partial + (None,) - s.execute("INSERT INTO mytable (a, b) VALUES (1, %s)", parameters=(partial,)) - result = s.execute("SELECT b FROM mytable WHERE a=1")[0] + s.execute("INSERT INTO tuple_type (a, b) VALUES (1, %s)", parameters=(partial,)) + result = s.execute("SELECT b FROM tuple_type WHERE a=1")[0] self.assertEqual(partial_result, result.b) # test single value tuples subpartial = ('zoo',) subpartial_result = subpartial + (None, None) - s.execute("INSERT INTO mytable (a, b) VALUES (2, %s)", parameters=(subpartial,)) - result = s.execute("SELECT b FROM mytable WHERE a=2")[0] + s.execute("INSERT INTO tuple_type (a, b) VALUES (2, %s)", parameters=(subpartial,)) + result = s.execute("SELECT b FROM tuple_type WHERE a=2")[0] self.assertEqual(subpartial_result, result.b) # test prepared statement - prepared = s.prepare("INSERT INTO mytable (a, b) VALUES (?, ?)") + prepared = s.prepare("INSERT INTO tuple_type (a, b) VALUES (?, ?)") s.execute(prepared, parameters=(3, complete)) s.execute(prepared, parameters=(4, partial)) s.execute(prepared, parameters=(5, subpartial)) @@ -463,11 +435,13 @@ def test_tuple_type(self): # extra items in the tuple should result in an error self.assertRaises(ValueError, s.execute, prepared, parameters=(0, (1, 2, 3, 4, 5, 6))) - prepared = s.prepare("SELECT b FROM mytable WHERE a=?") + prepared = s.prepare("SELECT b FROM tuple_type WHERE a=?") self.assertEqual(complete, s.execute(prepared, (3,))[0].b) self.assertEqual(partial_result, s.execute(prepared, (4,))[0].b) self.assertEqual(subpartial_result, s.execute(prepared, (5,))[0].b) + s.shutdown() + def test_tuple_type_varying_lengths(self): """ Test tuple types of lengths of 1, 2, 3, and 384 to ensure edge cases work @@ -477,39 +451,36 @@ def test_tuple_type_varying_lengths(self): if self._cass_version < (2, 1, 0): raise unittest.SkipTest("The tuple type was introduced in Cassandra 2.1") - c = Cluster(protocol_version=PROTOCOL_VERSION) - s = c.connect() + s = self._session.cluster.connect() + s.set_keyspace(self._session.keyspace) # set the row_factory to dict_factory for programmatic access # set the encoder for tuples for the ability to write tuples s.row_factory = dict_factory s.encoder.mapping[tuple] = s.encoder.cql_encode_tuple - s.execute("""CREATE KEYSPACE test_tuple_type_varying_lengths - WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1'}""") - s.set_keyspace("test_tuple_type_varying_lengths") - # programmatically create the table with tuples of said sizes lengths = (1, 2, 3, 384) value_schema = [] for i in lengths: value_schema += [' v_%s frozen>' % (i, ', '.join(['int'] * i))] - s.execute("CREATE TABLE mytable (k int PRIMARY KEY, %s)" % (', '.join(value_schema),)) + s.execute("CREATE TABLE tuple_lengths (k int PRIMARY KEY, %s)" % (', '.join(value_schema),)) # insert tuples into same key using different columns # and verify the results for i in lengths: # ensure tuples of larger sizes throw an error created_tuple = tuple(range(0, i + 1)) - self.assertRaises(InvalidRequest, s.execute, "INSERT INTO mytable (k, v_%s) VALUES (0, %s)", (i, created_tuple)) + self.assertRaises(InvalidRequest, s.execute, "INSERT INTO tuple_lengths (k, v_%s) VALUES (0, %s)", (i, created_tuple)) # ensure tuples of proper sizes are written and read correctly created_tuple = tuple(range(0, i)) - s.execute("INSERT INTO mytable (k, v_%s) VALUES (0, %s)", (i, created_tuple)) + s.execute("INSERT INTO tuple_lengths (k, v_%s) VALUES (0, %s)", (i, created_tuple)) - result = s.execute("SELECT v_%s FROM mytable WHERE k=0", (i,))[0] + result = s.execute("SELECT v_%s FROM tuple_lengths WHERE k=0", (i,))[0] self.assertEqual(tuple(created_tuple), result['v_%s' % i]) + s.shutdown() def test_tuple_primitive_subtypes(self): """ @@ -519,15 +490,11 @@ def test_tuple_primitive_subtypes(self): if self._cass_version < (2, 1, 0): raise unittest.SkipTest("The tuple type was introduced in Cassandra 2.1") - c = Cluster(protocol_version=PROTOCOL_VERSION) - s = c.connect() + s = self._session.cluster.connect() + s.set_keyspace(self._session.keyspace) s.encoder.mapping[tuple] = s.encoder.cql_encode_tuple - s.execute("""CREATE KEYSPACE test_tuple_primitive_subtypes - WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1'}""") - s.set_keyspace("test_tuple_primitive_subtypes") - - s.execute("CREATE TABLE mytable (" + s.execute("CREATE TABLE tuple_primitive (" "k int PRIMARY KEY, " "v frozen>)" % ','.join(DATA_TYPE_PRIMITIVES)) @@ -538,10 +505,11 @@ def test_tuple_primitive_subtypes(self): response_tuple = tuple(created_tuple + [None for j in range(len(DATA_TYPE_PRIMITIVES) - i - 1)]) written_tuple = tuple(created_tuple) - s.execute("INSERT INTO mytable (k, v) VALUES (%s, %s)", (i, written_tuple)) + s.execute("INSERT INTO tuple_primitive (k, v) VALUES (%s, %s)", (i, written_tuple)) - result = s.execute("SELECT v FROM mytable WHERE k=%s", (i,))[0] + result = s.execute("SELECT v FROM tuple_primitive WHERE k=%s", (i,))[0] self.assertEqual(response_tuple, result.v) + s.shutdown() def test_tuple_non_primitive_subtypes(self): """ @@ -551,18 +519,14 @@ def test_tuple_non_primitive_subtypes(self): if self._cass_version < (2, 1, 0): raise unittest.SkipTest("The tuple type was introduced in Cassandra 2.1") - c = Cluster(protocol_version=PROTOCOL_VERSION) - s = c.connect() + s = self._session.cluster.connect() + s.set_keyspace(self._session.keyspace) # set the row_factory to dict_factory for programmatic access # set the encoder for tuples for the ability to write tuples s.row_factory = dict_factory s.encoder.mapping[tuple] = s.encoder.cql_encode_tuple - s.execute("""CREATE KEYSPACE test_tuple_non_primitive_subtypes - WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1'}""") - s.set_keyspace("test_tuple_non_primitive_subtypes") - values = [] # create list values @@ -588,7 +552,7 @@ def test_tuple_non_primitive_subtypes(self): )) # create table - s.execute("CREATE TABLE mytable (" + s.execute("CREATE TABLE tuple_non_primative (" "k int PRIMARY KEY, " "%s)" % ', '.join(values)) @@ -596,18 +560,18 @@ def test_tuple_non_primitive_subtypes(self): # test tuple> for datatype in DATA_TYPE_PRIMITIVES: created_tuple = tuple([[get_sample(datatype)]]) - s.execute("INSERT INTO mytable (k, v_%s) VALUES (0, %s)", (i, created_tuple)) + s.execute("INSERT INTO tuple_non_primative (k, v_%s) VALUES (0, %s)", (i, created_tuple)) - result = s.execute("SELECT v_%s FROM mytable WHERE k=0", (i,))[0] + result = s.execute("SELECT v_%s FROM tuple_non_primative WHERE k=0", (i,))[0] self.assertEqual(created_tuple, result['v_%s' % i]) i += 1 # test tuple> for datatype in DATA_TYPE_PRIMITIVES: created_tuple = tuple([sortedset([get_sample(datatype)])]) - s.execute("INSERT INTO mytable (k, v_%s) VALUES (0, %s)", (i, created_tuple)) + s.execute("INSERT INTO tuple_non_primative (k, v_%s) VALUES (0, %s)", (i, created_tuple)) - result = s.execute("SELECT v_%s FROM mytable WHERE k=0", (i,))[0] + result = s.execute("SELECT v_%s FROM tuple_non_primative WHERE k=0", (i,))[0] self.assertEqual(created_tuple, result['v_%s' % i]) i += 1 @@ -619,11 +583,12 @@ def test_tuple_non_primitive_subtypes(self): else: created_tuple = tuple([{get_sample(datatype): get_sample(datatype)}]) - s.execute("INSERT INTO mytable (k, v_%s) VALUES (0, %s)", (i, created_tuple)) + s.execute("INSERT INTO tuple_non_primative (k, v_%s) VALUES (0, %s)", (i, created_tuple)) - result = s.execute("SELECT v_%s FROM mytable WHERE k=0", (i,))[0] + result = s.execute("SELECT v_%s FROM tuple_non_primative WHERE k=0", (i,))[0] self.assertEqual(created_tuple, result['v_%s' % i]) i += 1 + s.shutdown() def nested_tuples_schema_helper(self, depth): """ @@ -653,20 +618,16 @@ def test_nested_tuples(self): if self._cass_version < (2, 1, 0): raise unittest.SkipTest("The tuple type was introduced in Cassandra 2.1") - c = Cluster(protocol_version=PROTOCOL_VERSION) - s = c.connect() + s = self._session.cluster.connect() + s.set_keyspace(self._session.keyspace) # set the row_factory to dict_factory for programmatic access # set the encoder for tuples for the ability to write tuples s.row_factory = dict_factory s.encoder.mapping[tuple] = s.encoder.cql_encode_tuple - s.execute("""CREATE KEYSPACE test_nested_tuples - WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1'}""") - s.set_keyspace("test_nested_tuples") - # create a table with multiple sizes of nested tuples - s.execute("CREATE TABLE mytable (" + s.execute("CREATE TABLE nested_tuples (" "k int PRIMARY KEY, " "v_1 frozen<%s>," "v_2 frozen<%s>," @@ -682,11 +643,12 @@ def test_nested_tuples(self): created_tuple = self.nested_tuples_creator_helper(i) # write tuple - s.execute("INSERT INTO mytable (k, v_%s) VALUES (%s, %s)", (i, i, created_tuple)) + s.execute("INSERT INTO nested_tuples (k, v_%s) VALUES (%s, %s)", (i, i, created_tuple)) # verify tuple was written and read correctly - result = s.execute("SELECT v_%s FROM mytable WHERE k=%s", (i, i))[0] + result = s.execute("SELECT v_%s FROM nested_tuples WHERE k=%s", (i, i))[0] self.assertEqual(created_tuple, result['v_%s' % i]) + s.shutdown() def test_tuples_with_nulls(self): """ @@ -695,35 +657,27 @@ def test_tuples_with_nulls(self): if self._cass_version < (2, 1, 0): raise unittest.SkipTest("The tuple type was introduced in Cassandra 2.1") - c = Cluster(protocol_version=PROTOCOL_VERSION) - s = c.connect() + s = self._session - s.execute("""CREATE KEYSPACE test_tuples_with_nulls - WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1'}""") - s.set_keyspace("test_tuples_with_nulls") + s.execute("CREATE TABLE tuples_nulls (k int PRIMARY KEY, t frozen>)") - s.execute("CREATE TABLE mytable (k int PRIMARY KEY, t frozen>)") - - insert = s.prepare("INSERT INTO mytable (k, t) VALUES (0, ?)") + insert = s.prepare("INSERT INTO tuples_nulls (k, t) VALUES (0, ?)") s.execute(insert, [(None, None, None, None)]) - result = s.execute("SELECT * FROM mytable WHERE k=0") + result = s.execute("SELECT * FROM tuples_nulls WHERE k=0") self.assertEquals((None, None, None, None), result[0].t) - read = s.prepare("SELECT * FROM mytable WHERE k=0") + read = s.prepare("SELECT * FROM tuples_nulls WHERE k=0") self.assertEquals((None, None, None, None), s.execute(read)[0].t) # also test empty strings where compatible s.execute(insert, [('', None, None, b'')]) - result = s.execute("SELECT * FROM mytable WHERE k=0") + result = s.execute("SELECT * FROM tuples_nulls WHERE k=0") self.assertEquals(('', None, None, b''), result[0].t) self.assertEquals(('', None, None, b''), s.execute(read)[0].t) - c.shutdown() - def test_unicode_query_string(self): - c = Cluster(protocol_version=PROTOCOL_VERSION) - s = c.connect() + s = self._session query = u"SELECT * FROM system.schema_columnfamilies WHERE keyspace_name = 'ef\u2052ef' AND columnfamily_name = %s" s.execute(query, (u"fe\u2051fe",)) @@ -739,10 +693,13 @@ def test_nested_collections(self): if self._cass_version < (2, 1, 3): raise unittest.SkipTest("Support for nested collections was introduced in Cassandra 2.1.3") + if PROTOCOL_VERSION < 3: + raise unittest.SkipTest("Protocol version > 3 required for nested collections") + name = self._testMethodName - c = Cluster(protocol_version=PROTOCOL_VERSION) - s = c.connect('test1rf') + s = self._session.cluster.connect() + s.set_keyspace(self._session.keyspace) s.encoder.mapping[tuple] = s.encoder.cql_encode_tuple s.execute(""" From 52cccb79d3f4c5fef0003aa99b821980698ec1b5 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 3 Feb 2015 11:22:04 -0600 Subject: [PATCH 1195/3726] Tests for disabling control connection meta refresh PYTHON-202 --- tests/unit/test_control_connection.py | 57 ++++++++++++++++++++++++--- 1 file changed, 52 insertions(+), 5 deletions(-) diff --git a/tests/unit/test_control_connection.py b/tests/unit/test_control_connection.py index de95dbf475..a0cb65235d 100644 --- a/tests/unit/test_control_connection.py +++ b/tests/unit/test_control_connection.py @@ -15,15 +15,14 @@ try: import unittest2 as unittest except ImportError: - import unittest # noqa - -from mock import Mock, ANY + import unittest # noqa from concurrent.futures import ThreadPoolExecutor +from mock import Mock, ANY, call from cassandra import OperationTimedOut from cassandra.protocol import ResultMessage, RESULT_KIND_ROWS -from cassandra.cluster import ControlConnection, Cluster, _Scheduler +from cassandra.cluster import ControlConnection, _Scheduler from cassandra.pool import Host from cassandra.policies import (SimpleConvictionPolicy, RoundRobinPolicy, ConstantReconnectionPolicy) @@ -337,7 +336,7 @@ def bad_wait_for_responses(*args, **kwargs): self.connection.wait_for_responses = Mock(side_effect=bad_wait_for_responses) self.control_connection.refresh_schema() - self.assertEqual(self.connection.wait_for_responses.call_count, self.cluster.max_schema_agreement_wait/self.control_connection._timeout) + self.assertEqual(self.connection.wait_for_responses.call_count, self.cluster.max_schema_agreement_wait / self.control_connection._timeout) self.assertEqual(self.connection.wait_for_responses.call_args[1]['timeout'], self.control_connection._timeout) def test_handle_topology_change(self): @@ -417,3 +416,51 @@ def test_handle_schema_change(self): event['table'] = None self.control_connection._handle_schema_change(event) self.cluster.scheduler.schedule_unique.assert_called_once_with(0.0, self.control_connection.refresh_schema, 'ks1', None, None) + + def test_refresh_disabled(self): + cluster = MockCluster() + + schema_event = { + 'change_type': 'CREATED', + 'keyspace': 'ks1', + 'table': 'table1' + } + + status_event = { + 'change_type': 'UP', + 'address': ('1.2.3.4', 9000) + } + + topo_event = { + 'change_type': 'MOVED_NODE', + 'address': ('1.2.3.4', 9000) + } + + cc_no_schema_refresh = ControlConnection(cluster, 1, -1, 0) + cluster.scheduler.reset_mock() + + # no call on schema refresh + cc_no_schema_refresh._handle_schema_change(schema_event) + self.assertFalse(cluster.scheduler.schedule.called) + self.assertFalse(cluster.scheduler.schedule_unique.called) + + # topo and status changes as normal + cc_no_schema_refresh._handle_status_change(status_event) + cc_no_schema_refresh._handle_topology_change(topo_event) + cluster.scheduler.schedule_unique.assert_has_calls([call(ANY, cc_no_schema_refresh.refresh_node_list_and_token_map), + call(ANY, cc_no_schema_refresh.refresh_node_list_and_token_map)]) + + cc_no_topo_refresh = ControlConnection(cluster, 1, 0, -1) + cluster.scheduler.reset_mock() + + # no call on topo refresh + cc_no_topo_refresh._handle_topology_change(topo_event) + self.assertFalse(cluster.scheduler.schedule.called) + self.assertFalse(cluster.scheduler.schedule_unique.called) + + # schema and status change refresh as normal + cc_no_topo_refresh._handle_status_change(status_event) + cc_no_topo_refresh._handle_schema_change(schema_event) + cluster.scheduler.schedule_unique.assert_has_calls([call(ANY, cc_no_topo_refresh.refresh_node_list_and_token_map), + call(0.0, cc_no_topo_refresh.refresh_schema, + schema_event['keyspace'], schema_event['table'], None)]) From f35d3735501bdb438579f6990df80aa9912d8fd5 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 3 Feb 2015 12:00:35 -0600 Subject: [PATCH 1196/3726] ignore Mac .DS_Store files --- .gitignore | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.gitignore b/.gitignore index 7595803e08..4774a926f5 100644 --- a/.gitignore +++ b/.gitignore @@ -17,3 +17,6 @@ docs/_build/ tests/integration/ccm setuptools*.tar.gz setuptools*.egg + +# OSX +.DS_Store From df2009d18d5d45732089f333fa67608a8b4569d7 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 3 Feb 2015 12:28:13 -0600 Subject: [PATCH 1197/3726] assertEquals --> assertEqual --- tests/integration/standard/test_query_paging.py | 10 +++++----- tests/integration/standard/test_types.py | 13 ++++++------- 2 files changed, 11 insertions(+), 12 deletions(-) diff --git a/tests/integration/standard/test_query_paging.py b/tests/integration/standard/test_query_paging.py index e634f36a42..cc3dea6bc1 100644 --- a/tests/integration/standard/test_query_paging.py +++ b/tests/integration/standard/test_query_paging.py @@ -251,7 +251,7 @@ def handle_error(err): future.add_callbacks(callback=handle_page, callback_args=(future, counter), errback=handle_error) event.wait() - self.assertEquals(next(counter), 100) + self.assertEqual(next(counter), 100) # simple statement future = self.session.execute_async(SimpleStatement("SELECT * FROM test3rf.test")) @@ -260,7 +260,7 @@ def handle_error(err): future.add_callbacks(callback=handle_page, callback_args=(future, counter), errback=handle_error) event.wait() - self.assertEquals(next(counter), 100) + self.assertEqual(next(counter), 100) # prepared statement future = self.session.execute_async(prepared) @@ -269,7 +269,7 @@ def handle_error(err): future.add_callbacks(callback=handle_page, callback_args=(future, counter), errback=handle_error) event.wait() - self.assertEquals(next(counter), 100) + self.assertEqual(next(counter), 100) def test_concurrent_with_paging(self): statements_and_params = zip(cycle(["INSERT INTO test3rf.test (k, v) VALUES (%s, 0)"]), @@ -281,10 +281,10 @@ def test_concurrent_with_paging(self): for fetch_size in (2, 3, 7, 10, 99, 100, 101, 10000): self.session.default_fetch_size = fetch_size results = execute_concurrent_with_args(self.session, prepared, [None] * 10) - self.assertEquals(10, len(results)) + self.assertEqual(10, len(results)) for (success, result) in results: self.assertTrue(success) - self.assertEquals(100, len(list(result))) + self.assertEqual(100, len(list(result))) def test_fetch_size(self): """ diff --git a/tests/integration/standard/test_types.py b/tests/integration/standard/test_types.py index f86cf6e7b9..50d556b21f 100644 --- a/tests/integration/standard/test_types.py +++ b/tests/integration/standard/test_types.py @@ -700,16 +700,16 @@ def test_tuples_with_nulls(self): s.execute(insert, [(None, None, None, None)]) result = s.execute("SELECT * FROM mytable WHERE k=0") - self.assertEquals((None, None, None, None), result[0].t) + self.assertEqual((None, None, None, None), result[0].t) read = s.prepare("SELECT * FROM mytable WHERE k=0") - self.assertEquals((None, None, None, None), s.execute(read)[0].t) + self.assertEqual((None, None, None, None), s.execute(read)[0].t) # also test empty strings where compatible s.execute(insert, [('', None, None, b'')]) result = s.execute("SELECT * FROM mytable WHERE k=0") - self.assertEquals(('', None, None, b''), result[0].t) - self.assertEquals(('', None, None, b''), s.execute(read)[0].t) + self.assertEqual(('', None, None, b''), result[0].t) + self.assertEqual(('', None, None, b''), s.execute(read)[0].t) c.shutdown() @@ -760,8 +760,7 @@ def test_nested_collections(self): map_list map>, frozen>>, map_tuple map>, frozen>>, map_udt map, frozen<%s>>, - )""" - % (name, name, name)) + )""" % (name, name, name)) validate = partial(self.insert_select_column, s, name) validate('map_map', OrderedMap([({1: 1, 2: 2}, {3: 3, 4: 4}), ({5: 5, 6: 6}, {7: 7, 8: 8})])) @@ -769,7 +768,7 @@ def test_nested_collections(self): validate('map_list', OrderedMap([([1, 2], [3, 4]), ([5, 6], [7, 8])])) validate('map_tuple', OrderedMap([((1, 2), (3,)), ((4, 5), (6,))])) - value = nested_collection_udt({1: 'v1', 2: 'v2'}, (3, 'v3'), [4, 5, 6, 7], set((8,9,10))) + value = nested_collection_udt({1: 'v1', 2: 'v2'}, (3, 'v3'), [4, 5, 6, 7], set((8, 9, 10))) key = nested_collection_udt_nested(value.m, value.t, value.l, value.s, value) key2 = nested_collection_udt_nested({3: 'v3'}, value.t, value.l, value.s, value) validate('map_udt', OrderedMap([(key, value), (key2, value)])) From 0ded875003526a0757178dcd66e992b654306f73 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 3 Feb 2015 16:00:00 -0600 Subject: [PATCH 1198/3726] Move cqlengine to cassandra package --- cassandra/cqlengine/__init__.py | 9 +++++ {cqlengine => cassandra/cqlengine}/columns.py | 8 ++--- .../cqlengine}/connection.py | 18 ++++------ .../cqlengine}/exceptions.py | 0 .../cqlengine}/functions.py | 6 ++-- .../cqlengine}/management.py | 17 +++++----- {cqlengine => cassandra/cqlengine}/models.py | 13 ++++---- {cqlengine => cassandra/cqlengine}/named.py | 9 +++-- .../cqlengine}/operators.py | 3 +- {cqlengine => cassandra/cqlengine}/query.py | 28 ++++++++-------- .../cqlengine}/statements.py | 11 +++---- .../cqlengine}/tests/__init__.py | 0 .../cqlengine}/tests/base.py | 0 .../cqlengine}/tests/columns/__init__.py | 0 .../tests/columns/test_container_columns.py | 0 .../tests/columns/test_counter_column.py | 0 .../tests/columns/test_static_column.py | 0 .../tests/columns/test_validation.py | 0 .../cqlengine}/tests/columns/test_value_io.py | 0 .../cqlengine}/tests/connections/__init__.py | 0 .../cqlengine}/tests/management/__init__.py | 0 .../management/test_compaction_settings.py | 0 .../tests/management/test_management.py | 0 .../cqlengine}/tests/model/__init__.py | 0 .../tests/model/test_class_construction.py | 0 .../tests/model/test_equality_operations.py | 0 .../cqlengine}/tests/model/test_model.py | 0 .../cqlengine}/tests/model/test_model_io.py | 0 .../tests/model/test_polymorphism.py | 0 .../cqlengine}/tests/model/test_updates.py | 0 .../cqlengine}/tests/model/test_validation.py | 0 .../tests/model/test_value_lists.py | 0 .../cqlengine}/tests/operators/__init__.py | 0 .../operators/test_assignment_operators.py | 0 .../tests/operators/test_base_operator.py | 0 .../tests/operators/test_where_operators.py | 0 .../cqlengine}/tests/query/__init__.py | 0 .../tests/query/test_batch_query.py | 0 .../tests/query/test_datetime_queries.py | 0 .../cqlengine}/tests/query/test_named.py | 0 .../tests/query/test_queryoperators.py | 0 .../cqlengine}/tests/query/test_queryset.py | 0 .../cqlengine}/tests/query/test_updates.py | 0 .../cqlengine}/tests/statements/__init__.py | 0 .../statements/test_assignment_clauses.py | 0 .../statements/test_assignment_statement.py | 0 .../tests/statements/test_base_clause.py | 0 .../tests/statements/test_base_statement.py | 0 .../tests/statements/test_delete_statement.py | 0 .../tests/statements/test_insert_statement.py | 0 .../tests/statements/test_quoter.py | 0 .../tests/statements/test_select_statement.py | 0 .../tests/statements/test_update_statement.py | 0 .../tests/statements/test_where_clause.py | 0 .../cqlengine}/tests/test_batch_query.py | 0 .../cqlengine}/tests/test_consistency.py | 0 .../cqlengine}/tests/test_ifnotexists.py | 0 .../cqlengine}/tests/test_load.py | 0 .../cqlengine}/tests/test_timestamp.py | 0 .../cqlengine}/tests/test_transaction.py | 0 .../cqlengine}/tests/test_ttl.py | 0 cqlengine/VERSION | 1 - cqlengine/__init__.py | 33 ------------------- 63 files changed, 61 insertions(+), 95 deletions(-) create mode 100644 cassandra/cqlengine/__init__.py rename {cqlengine => cassandra/cqlengine}/columns.py (99%) rename {cqlengine => cassandra/cqlengine}/connection.py (90%) rename {cqlengine => cassandra/cqlengine}/exceptions.py (100%) rename {cqlengine => cassandra/cqlengine}/functions.py (98%) rename {cqlengine => cassandra/cqlengine}/management.py (97%) rename {cqlengine => cassandra/cqlengine}/models.py (98%) rename {cqlengine => cassandra/cqlengine}/named.py (89%) rename {cqlengine => cassandra/cqlengine}/operators.py (99%) rename {cqlengine => cassandra/cqlengine}/query.py (97%) rename {cqlengine => cassandra/cqlengine}/statements.py (99%) rename {cqlengine => cassandra/cqlengine}/tests/__init__.py (100%) rename {cqlengine => cassandra/cqlengine}/tests/base.py (100%) rename {cqlengine => cassandra/cqlengine}/tests/columns/__init__.py (100%) rename {cqlengine => cassandra/cqlengine}/tests/columns/test_container_columns.py (100%) rename {cqlengine => cassandra/cqlengine}/tests/columns/test_counter_column.py (100%) rename {cqlengine => cassandra/cqlengine}/tests/columns/test_static_column.py (100%) rename {cqlengine => cassandra/cqlengine}/tests/columns/test_validation.py (100%) rename {cqlengine => cassandra/cqlengine}/tests/columns/test_value_io.py (100%) rename {cqlengine => cassandra/cqlengine}/tests/connections/__init__.py (100%) rename {cqlengine => cassandra/cqlengine}/tests/management/__init__.py (100%) rename {cqlengine => cassandra/cqlengine}/tests/management/test_compaction_settings.py (100%) rename {cqlengine => cassandra/cqlengine}/tests/management/test_management.py (100%) rename {cqlengine => cassandra/cqlengine}/tests/model/__init__.py (100%) rename {cqlengine => cassandra/cqlengine}/tests/model/test_class_construction.py (100%) rename {cqlengine => cassandra/cqlengine}/tests/model/test_equality_operations.py (100%) rename {cqlengine => cassandra/cqlengine}/tests/model/test_model.py (100%) rename {cqlengine => cassandra/cqlengine}/tests/model/test_model_io.py (100%) rename {cqlengine => cassandra/cqlengine}/tests/model/test_polymorphism.py (100%) rename {cqlengine => cassandra/cqlengine}/tests/model/test_updates.py (100%) rename {cqlengine => cassandra/cqlengine}/tests/model/test_validation.py (100%) rename {cqlengine => cassandra/cqlengine}/tests/model/test_value_lists.py (100%) rename {cqlengine => cassandra/cqlengine}/tests/operators/__init__.py (100%) rename {cqlengine => cassandra/cqlengine}/tests/operators/test_assignment_operators.py (100%) rename {cqlengine => cassandra/cqlengine}/tests/operators/test_base_operator.py (100%) rename {cqlengine => cassandra/cqlengine}/tests/operators/test_where_operators.py (100%) rename {cqlengine => cassandra/cqlengine}/tests/query/__init__.py (100%) rename {cqlengine => cassandra/cqlengine}/tests/query/test_batch_query.py (100%) rename {cqlengine => cassandra/cqlengine}/tests/query/test_datetime_queries.py (100%) rename {cqlengine => cassandra/cqlengine}/tests/query/test_named.py (100%) rename {cqlengine => cassandra/cqlengine}/tests/query/test_queryoperators.py (100%) rename {cqlengine => cassandra/cqlengine}/tests/query/test_queryset.py (100%) rename {cqlengine => cassandra/cqlengine}/tests/query/test_updates.py (100%) rename {cqlengine => cassandra/cqlengine}/tests/statements/__init__.py (100%) rename {cqlengine => cassandra/cqlengine}/tests/statements/test_assignment_clauses.py (100%) rename {cqlengine => cassandra/cqlengine}/tests/statements/test_assignment_statement.py (100%) rename {cqlengine => cassandra/cqlengine}/tests/statements/test_base_clause.py (100%) rename {cqlengine => cassandra/cqlengine}/tests/statements/test_base_statement.py (100%) rename {cqlengine => cassandra/cqlengine}/tests/statements/test_delete_statement.py (100%) rename {cqlengine => cassandra/cqlengine}/tests/statements/test_insert_statement.py (100%) rename {cqlengine => cassandra/cqlengine}/tests/statements/test_quoter.py (100%) rename {cqlengine => cassandra/cqlengine}/tests/statements/test_select_statement.py (100%) rename {cqlengine => cassandra/cqlengine}/tests/statements/test_update_statement.py (100%) rename {cqlengine => cassandra/cqlengine}/tests/statements/test_where_clause.py (100%) rename {cqlengine => cassandra/cqlengine}/tests/test_batch_query.py (100%) rename {cqlengine => cassandra/cqlengine}/tests/test_consistency.py (100%) rename {cqlengine => cassandra/cqlengine}/tests/test_ifnotexists.py (100%) rename {cqlengine => cassandra/cqlengine}/tests/test_load.py (100%) rename {cqlengine => cassandra/cqlengine}/tests/test_timestamp.py (100%) rename {cqlengine => cassandra/cqlengine}/tests/test_transaction.py (100%) rename {cqlengine => cassandra/cqlengine}/tests/test_ttl.py (100%) delete mode 100644 cqlengine/VERSION delete mode 100644 cqlengine/__init__.py diff --git a/cassandra/cqlengine/__init__.py b/cassandra/cqlengine/__init__.py new file mode 100644 index 0000000000..1c61c4ee8e --- /dev/null +++ b/cassandra/cqlengine/__init__.py @@ -0,0 +1,9 @@ +# compaction +SizeTieredCompactionStrategy = "SizeTieredCompactionStrategy" +LeveledCompactionStrategy = "LeveledCompactionStrategy" + +# Caching constants. +CACHING_ALL = "ALL" +CACHING_KEYS_ONLY = "KEYS_ONLY" +CACHING_ROWS_ONLY = "ROWS_ONLY" +CACHING_NONE = "NONE" diff --git a/cqlengine/columns.py b/cassandra/cqlengine/columns.py similarity index 99% rename from cqlengine/columns.py rename to cassandra/cqlengine/columns.py index bb2d77bb87..cc43a11712 100644 --- a/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -3,13 +3,13 @@ from datetime import datetime from datetime import date import re -from cassandra.cqltypes import DateType +import six +import sys -from cqlengine.exceptions import ValidationError +from cassandra.cqltypes import DateType from cassandra.encoder import cql_quote -import six +from cassandra.cqlengine.exceptions import ValidationError -import sys # move to central spot class UnicodeMixin(object): diff --git a/cqlengine/connection.py b/cassandra/cqlengine/connection.py similarity index 90% rename from cqlengine/connection.py rename to cassandra/cqlengine/connection.py index 34f8da3acb..d2af8caeab 100644 --- a/cqlengine/connection.py +++ b/cassandra/cqlengine/connection.py @@ -1,17 +1,13 @@ -#http://pypi.python.org/pypi/cql/1.0.4 -#http://code.google.com/a/apache-extras.org/p/cassandra-dbapi2 / -#http://cassandra.apache.org/doc/cql/CQL.html -from __future__ import absolute_import from collections import namedtuple -from cassandra.cluster import Cluster, _NOT_SET, NoHostAvailable -from cassandra.query import SimpleStatement, Statement, dict_factory -from cqlengine.statements import BaseCQLStatement -from cqlengine.exceptions import CQLEngineException, UndefinedKeyspaceException -from cassandra import ConsistencyLevel - import six import logging +from cassandra import ConsistencyLevel +from cassandra.cluster import Cluster, _NOT_SET, NoHostAvailable +from cassandra.query import SimpleStatement, Statement, dict_factory +from cassandra.cqlengine.exceptions import CQLEngineException, UndefinedKeyspaceException +from cassandra.cqlengine.statements import BaseCQLStatement + LOG = logging.getLogger('cqlengine.cql') NOT_SET = _NOT_SET # required for passing timeout to Session.execute @@ -55,7 +51,7 @@ def setup( if not default_keyspace: raise UndefinedKeyspaceException() - from cqlengine import models + from cassandra.cqlengine import models models.DEFAULT_KEYSPACE = default_keyspace default_consistency_level = consistency diff --git a/cqlengine/exceptions.py b/cassandra/cqlengine/exceptions.py similarity index 100% rename from cqlengine/exceptions.py rename to cassandra/cqlengine/exceptions.py diff --git a/cqlengine/functions.py b/cassandra/cqlengine/functions.py similarity index 98% rename from cqlengine/functions.py rename to cassandra/cqlengine/functions.py index d146290722..78d42479b6 100644 --- a/cqlengine/functions.py +++ b/cassandra/cqlengine/functions.py @@ -1,8 +1,8 @@ from datetime import datetime -from uuid import uuid1 -import sys import six -from cqlengine.exceptions import ValidationError +import sys + +from cassandra.cqlengine.exceptions import ValidationError # move to central spot class UnicodeMixin(object): diff --git a/cqlengine/management.py b/cassandra/cqlengine/management.py similarity index 97% rename from cqlengine/management.py rename to cassandra/cqlengine/management.py index 388efbcd68..58f4a871bb 100644 --- a/cqlengine/management.py +++ b/cassandra/cqlengine/management.py @@ -1,19 +1,18 @@ +from collections import namedtuple import json -import warnings +import logging import six -from cqlengine import SizeTieredCompactionStrategy, LeveledCompactionStrategy -from cqlengine import ONE -from cqlengine.named import NamedTable -from cqlengine.connection import execute, get_cluster -from cqlengine.exceptions import CQLEngineException +from cassandra.cqlengine import SizeTieredCompactionStrategy, LeveledCompactionStrategy +from cassandra.cqlengine.connection import execute, get_cluster +from cassandra.cqlengine.exceptions import CQLEngineException +from cassandra.cqlengine.models import Model +from cassandra.cqlengine.named import NamedTable + -import logging -from collections import namedtuple Field = namedtuple('Field', ['name', 'type']) logger = logging.getLogger(__name__) -from cqlengine.models import Model # system keyspaces schema_columnfamilies = NamedTable('system', 'schema_columnfamilies') diff --git a/cqlengine/models.py b/cassandra/cqlengine/models.py similarity index 98% rename from cqlengine/models.py rename to cassandra/cqlengine/models.py index d541f1a962..019d6beb94 100644 --- a/cqlengine/models.py +++ b/cassandra/cqlengine/models.py @@ -1,12 +1,11 @@ -from collections import OrderedDict import re -import warnings -from cqlengine import columns -from cqlengine.exceptions import ModelException, CQLEngineException, ValidationError -from cqlengine.query import ModelQuerySet, DMLQuery, AbstractQueryableColumn, NOT_SET -from cqlengine.query import DoesNotExist as _DoesNotExist -from cqlengine.query import MultipleObjectsReturned as _MultipleObjectsReturned +from cassandra.cqlengine import columns +from cassandra.cqlengine.exceptions import ModelException, CQLEngineException, ValidationError +from cassandra.cqlengine.query import ModelQuerySet, DMLQuery, AbstractQueryableColumn, NOT_SET +from cassandra.cqlengine.query import DoesNotExist as _DoesNotExist +from cassandra.cqlengine.query import MultipleObjectsReturned as _MultipleObjectsReturned +from cassandra.util import OrderedDict class ModelDefinitionException(ModelException): pass diff --git a/cqlengine/named.py b/cassandra/cqlengine/named.py similarity index 89% rename from cqlengine/named.py rename to cassandra/cqlengine/named.py index c6ba3ac995..0f0b86b262 100644 --- a/cqlengine/named.py +++ b/cassandra/cqlengine/named.py @@ -1,8 +1,7 @@ -from cqlengine.exceptions import CQLEngineException -from cqlengine.query import AbstractQueryableColumn, SimpleQuerySet - -from cqlengine.query import DoesNotExist as _DoesNotExist -from cqlengine.query import MultipleObjectsReturned as _MultipleObjectsReturned +from cassandra.cqlengine.exceptions import CQLEngineException +from cassandra.cqlengine.query import AbstractQueryableColumn, SimpleQuerySet +from cassandra.cqlengine.query import DoesNotExist as _DoesNotExist +from cassandra.cqlengine.query import MultipleObjectsReturned as _MultipleObjectsReturned class QuerySetDescriptor(object): """ diff --git a/cqlengine/operators.py b/cassandra/cqlengine/operators.py similarity index 99% rename from cqlengine/operators.py rename to cassandra/cqlengine/operators.py index 4776356623..e609531fa1 100644 --- a/cqlengine/operators.py +++ b/cassandra/cqlengine/operators.py @@ -1,10 +1,9 @@ import six +import sys class QueryOperatorException(Exception): pass -import sys - # move to central spot class UnicodeMixin(object): if sys.version_info > (3, 0): diff --git a/cqlengine/query.py b/cassandra/cqlengine/query.py similarity index 97% rename from cqlengine/query.py rename to cassandra/cqlengine/query.py index adb559ea11..ca647f016f 100644 --- a/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -1,20 +1,20 @@ -from __future__ import absolute_import import copy -import time from datetime import datetime, timedelta -from cqlengine import BaseContainerColumn, Map, columns -from cqlengine.columns import Counter, List, Set - -from .connection import execute, NOT_SET - -from cqlengine.exceptions import CQLEngineException, ValidationError, LWTException, IfNotExistsWithCounterColumn -from cqlengine.functions import Token, BaseQueryFunction, QueryValue, UnicodeMixin +import time -#CQL 3 reference: -#http://www.datastax.com/docs/1.1/references/cql/index -from cqlengine.operators import InOperator, EqualsOperator, GreaterThanOperator, GreaterThanOrEqualOperator -from cqlengine.operators import LessThanOperator, LessThanOrEqualOperator, BaseWhereOperator -from cqlengine.statements import WhereClause, SelectStatement, DeleteStatement, UpdateStatement, AssignmentClause, InsertStatement, BaseCQLStatement, MapUpdateClause, MapDeleteClause, ListUpdateClause, SetUpdateClause, CounterUpdateClause, TransactionClause +from cassandra.cqlengine.columns import BaseContainerColumn, Map, Counter, List, Set +from cassandra.cqlengine.connection import execute, NOT_SET +from cassandra.cqlengine.exceptions import CQLEngineException, ValidationError, LWTException, IfNotExistsWithCounterColumn +from cassandra.cqlengine.functions import Token, BaseQueryFunction, QueryValue, UnicodeMixin +from cassandra.cqlengine.operators import (InOperator, EqualsOperator, GreaterThanOperator, + GreaterThanOrEqualOperator, LessThanOperator, + LessThanOrEqualOperator, BaseWhereOperator) +# import * ? +from cassandra.cqlengine.statements import (WhereClause, SelectStatement, DeleteStatement, + UpdateStatement, AssignmentClause, InsertStatement, + BaseCQLStatement, MapUpdateClause, MapDeleteClause, + ListUpdateClause, SetUpdateClause, CounterUpdateClause, + TransactionClause) class QueryException(CQLEngineException): pass diff --git a/cqlengine/statements.py b/cassandra/cqlengine/statements.py similarity index 99% rename from cqlengine/statements.py rename to cassandra/cqlengine/statements.py index b044c86c7a..917b986239 100644 --- a/cqlengine/statements.py +++ b/cassandra/cqlengine/statements.py @@ -1,15 +1,14 @@ -import time from datetime import datetime, timedelta +import time import six -from cqlengine.functions import QueryValue -from cqlengine.operators import BaseWhereOperator, InOperator +import sys - -class StatementException(Exception): pass +from cassandra.cqlengine.functions import QueryValue +from cassandra.cqlengine.operators import BaseWhereOperator, InOperator +class StatementException(Exception): pass -import sys class UnicodeMixin(object): if sys.version_info > (3, 0): diff --git a/cqlengine/tests/__init__.py b/cassandra/cqlengine/tests/__init__.py similarity index 100% rename from cqlengine/tests/__init__.py rename to cassandra/cqlengine/tests/__init__.py diff --git a/cqlengine/tests/base.py b/cassandra/cqlengine/tests/base.py similarity index 100% rename from cqlengine/tests/base.py rename to cassandra/cqlengine/tests/base.py diff --git a/cqlengine/tests/columns/__init__.py b/cassandra/cqlengine/tests/columns/__init__.py similarity index 100% rename from cqlengine/tests/columns/__init__.py rename to cassandra/cqlengine/tests/columns/__init__.py diff --git a/cqlengine/tests/columns/test_container_columns.py b/cassandra/cqlengine/tests/columns/test_container_columns.py similarity index 100% rename from cqlengine/tests/columns/test_container_columns.py rename to cassandra/cqlengine/tests/columns/test_container_columns.py diff --git a/cqlengine/tests/columns/test_counter_column.py b/cassandra/cqlengine/tests/columns/test_counter_column.py similarity index 100% rename from cqlengine/tests/columns/test_counter_column.py rename to cassandra/cqlengine/tests/columns/test_counter_column.py diff --git a/cqlengine/tests/columns/test_static_column.py b/cassandra/cqlengine/tests/columns/test_static_column.py similarity index 100% rename from cqlengine/tests/columns/test_static_column.py rename to cassandra/cqlengine/tests/columns/test_static_column.py diff --git a/cqlengine/tests/columns/test_validation.py b/cassandra/cqlengine/tests/columns/test_validation.py similarity index 100% rename from cqlengine/tests/columns/test_validation.py rename to cassandra/cqlengine/tests/columns/test_validation.py diff --git a/cqlengine/tests/columns/test_value_io.py b/cassandra/cqlengine/tests/columns/test_value_io.py similarity index 100% rename from cqlengine/tests/columns/test_value_io.py rename to cassandra/cqlengine/tests/columns/test_value_io.py diff --git a/cqlengine/tests/connections/__init__.py b/cassandra/cqlengine/tests/connections/__init__.py similarity index 100% rename from cqlengine/tests/connections/__init__.py rename to cassandra/cqlengine/tests/connections/__init__.py diff --git a/cqlengine/tests/management/__init__.py b/cassandra/cqlengine/tests/management/__init__.py similarity index 100% rename from cqlengine/tests/management/__init__.py rename to cassandra/cqlengine/tests/management/__init__.py diff --git a/cqlengine/tests/management/test_compaction_settings.py b/cassandra/cqlengine/tests/management/test_compaction_settings.py similarity index 100% rename from cqlengine/tests/management/test_compaction_settings.py rename to cassandra/cqlengine/tests/management/test_compaction_settings.py diff --git a/cqlengine/tests/management/test_management.py b/cassandra/cqlengine/tests/management/test_management.py similarity index 100% rename from cqlengine/tests/management/test_management.py rename to cassandra/cqlengine/tests/management/test_management.py diff --git a/cqlengine/tests/model/__init__.py b/cassandra/cqlengine/tests/model/__init__.py similarity index 100% rename from cqlengine/tests/model/__init__.py rename to cassandra/cqlengine/tests/model/__init__.py diff --git a/cqlengine/tests/model/test_class_construction.py b/cassandra/cqlengine/tests/model/test_class_construction.py similarity index 100% rename from cqlengine/tests/model/test_class_construction.py rename to cassandra/cqlengine/tests/model/test_class_construction.py diff --git a/cqlengine/tests/model/test_equality_operations.py b/cassandra/cqlengine/tests/model/test_equality_operations.py similarity index 100% rename from cqlengine/tests/model/test_equality_operations.py rename to cassandra/cqlengine/tests/model/test_equality_operations.py diff --git a/cqlengine/tests/model/test_model.py b/cassandra/cqlengine/tests/model/test_model.py similarity index 100% rename from cqlengine/tests/model/test_model.py rename to cassandra/cqlengine/tests/model/test_model.py diff --git a/cqlengine/tests/model/test_model_io.py b/cassandra/cqlengine/tests/model/test_model_io.py similarity index 100% rename from cqlengine/tests/model/test_model_io.py rename to cassandra/cqlengine/tests/model/test_model_io.py diff --git a/cqlengine/tests/model/test_polymorphism.py b/cassandra/cqlengine/tests/model/test_polymorphism.py similarity index 100% rename from cqlengine/tests/model/test_polymorphism.py rename to cassandra/cqlengine/tests/model/test_polymorphism.py diff --git a/cqlengine/tests/model/test_updates.py b/cassandra/cqlengine/tests/model/test_updates.py similarity index 100% rename from cqlengine/tests/model/test_updates.py rename to cassandra/cqlengine/tests/model/test_updates.py diff --git a/cqlengine/tests/model/test_validation.py b/cassandra/cqlengine/tests/model/test_validation.py similarity index 100% rename from cqlengine/tests/model/test_validation.py rename to cassandra/cqlengine/tests/model/test_validation.py diff --git a/cqlengine/tests/model/test_value_lists.py b/cassandra/cqlengine/tests/model/test_value_lists.py similarity index 100% rename from cqlengine/tests/model/test_value_lists.py rename to cassandra/cqlengine/tests/model/test_value_lists.py diff --git a/cqlengine/tests/operators/__init__.py b/cassandra/cqlengine/tests/operators/__init__.py similarity index 100% rename from cqlengine/tests/operators/__init__.py rename to cassandra/cqlengine/tests/operators/__init__.py diff --git a/cqlengine/tests/operators/test_assignment_operators.py b/cassandra/cqlengine/tests/operators/test_assignment_operators.py similarity index 100% rename from cqlengine/tests/operators/test_assignment_operators.py rename to cassandra/cqlengine/tests/operators/test_assignment_operators.py diff --git a/cqlengine/tests/operators/test_base_operator.py b/cassandra/cqlengine/tests/operators/test_base_operator.py similarity index 100% rename from cqlengine/tests/operators/test_base_operator.py rename to cassandra/cqlengine/tests/operators/test_base_operator.py diff --git a/cqlengine/tests/operators/test_where_operators.py b/cassandra/cqlengine/tests/operators/test_where_operators.py similarity index 100% rename from cqlengine/tests/operators/test_where_operators.py rename to cassandra/cqlengine/tests/operators/test_where_operators.py diff --git a/cqlengine/tests/query/__init__.py b/cassandra/cqlengine/tests/query/__init__.py similarity index 100% rename from cqlengine/tests/query/__init__.py rename to cassandra/cqlengine/tests/query/__init__.py diff --git a/cqlengine/tests/query/test_batch_query.py b/cassandra/cqlengine/tests/query/test_batch_query.py similarity index 100% rename from cqlengine/tests/query/test_batch_query.py rename to cassandra/cqlengine/tests/query/test_batch_query.py diff --git a/cqlengine/tests/query/test_datetime_queries.py b/cassandra/cqlengine/tests/query/test_datetime_queries.py similarity index 100% rename from cqlengine/tests/query/test_datetime_queries.py rename to cassandra/cqlengine/tests/query/test_datetime_queries.py diff --git a/cqlengine/tests/query/test_named.py b/cassandra/cqlengine/tests/query/test_named.py similarity index 100% rename from cqlengine/tests/query/test_named.py rename to cassandra/cqlengine/tests/query/test_named.py diff --git a/cqlengine/tests/query/test_queryoperators.py b/cassandra/cqlengine/tests/query/test_queryoperators.py similarity index 100% rename from cqlengine/tests/query/test_queryoperators.py rename to cassandra/cqlengine/tests/query/test_queryoperators.py diff --git a/cqlengine/tests/query/test_queryset.py b/cassandra/cqlengine/tests/query/test_queryset.py similarity index 100% rename from cqlengine/tests/query/test_queryset.py rename to cassandra/cqlengine/tests/query/test_queryset.py diff --git a/cqlengine/tests/query/test_updates.py b/cassandra/cqlengine/tests/query/test_updates.py similarity index 100% rename from cqlengine/tests/query/test_updates.py rename to cassandra/cqlengine/tests/query/test_updates.py diff --git a/cqlengine/tests/statements/__init__.py b/cassandra/cqlengine/tests/statements/__init__.py similarity index 100% rename from cqlengine/tests/statements/__init__.py rename to cassandra/cqlengine/tests/statements/__init__.py diff --git a/cqlengine/tests/statements/test_assignment_clauses.py b/cassandra/cqlengine/tests/statements/test_assignment_clauses.py similarity index 100% rename from cqlengine/tests/statements/test_assignment_clauses.py rename to cassandra/cqlengine/tests/statements/test_assignment_clauses.py diff --git a/cqlengine/tests/statements/test_assignment_statement.py b/cassandra/cqlengine/tests/statements/test_assignment_statement.py similarity index 100% rename from cqlengine/tests/statements/test_assignment_statement.py rename to cassandra/cqlengine/tests/statements/test_assignment_statement.py diff --git a/cqlengine/tests/statements/test_base_clause.py b/cassandra/cqlengine/tests/statements/test_base_clause.py similarity index 100% rename from cqlengine/tests/statements/test_base_clause.py rename to cassandra/cqlengine/tests/statements/test_base_clause.py diff --git a/cqlengine/tests/statements/test_base_statement.py b/cassandra/cqlengine/tests/statements/test_base_statement.py similarity index 100% rename from cqlengine/tests/statements/test_base_statement.py rename to cassandra/cqlengine/tests/statements/test_base_statement.py diff --git a/cqlengine/tests/statements/test_delete_statement.py b/cassandra/cqlengine/tests/statements/test_delete_statement.py similarity index 100% rename from cqlengine/tests/statements/test_delete_statement.py rename to cassandra/cqlengine/tests/statements/test_delete_statement.py diff --git a/cqlengine/tests/statements/test_insert_statement.py b/cassandra/cqlengine/tests/statements/test_insert_statement.py similarity index 100% rename from cqlengine/tests/statements/test_insert_statement.py rename to cassandra/cqlengine/tests/statements/test_insert_statement.py diff --git a/cqlengine/tests/statements/test_quoter.py b/cassandra/cqlengine/tests/statements/test_quoter.py similarity index 100% rename from cqlengine/tests/statements/test_quoter.py rename to cassandra/cqlengine/tests/statements/test_quoter.py diff --git a/cqlengine/tests/statements/test_select_statement.py b/cassandra/cqlengine/tests/statements/test_select_statement.py similarity index 100% rename from cqlengine/tests/statements/test_select_statement.py rename to cassandra/cqlengine/tests/statements/test_select_statement.py diff --git a/cqlengine/tests/statements/test_update_statement.py b/cassandra/cqlengine/tests/statements/test_update_statement.py similarity index 100% rename from cqlengine/tests/statements/test_update_statement.py rename to cassandra/cqlengine/tests/statements/test_update_statement.py diff --git a/cqlengine/tests/statements/test_where_clause.py b/cassandra/cqlengine/tests/statements/test_where_clause.py similarity index 100% rename from cqlengine/tests/statements/test_where_clause.py rename to cassandra/cqlengine/tests/statements/test_where_clause.py diff --git a/cqlengine/tests/test_batch_query.py b/cassandra/cqlengine/tests/test_batch_query.py similarity index 100% rename from cqlengine/tests/test_batch_query.py rename to cassandra/cqlengine/tests/test_batch_query.py diff --git a/cqlengine/tests/test_consistency.py b/cassandra/cqlengine/tests/test_consistency.py similarity index 100% rename from cqlengine/tests/test_consistency.py rename to cassandra/cqlengine/tests/test_consistency.py diff --git a/cqlengine/tests/test_ifnotexists.py b/cassandra/cqlengine/tests/test_ifnotexists.py similarity index 100% rename from cqlengine/tests/test_ifnotexists.py rename to cassandra/cqlengine/tests/test_ifnotexists.py diff --git a/cqlengine/tests/test_load.py b/cassandra/cqlengine/tests/test_load.py similarity index 100% rename from cqlengine/tests/test_load.py rename to cassandra/cqlengine/tests/test_load.py diff --git a/cqlengine/tests/test_timestamp.py b/cassandra/cqlengine/tests/test_timestamp.py similarity index 100% rename from cqlengine/tests/test_timestamp.py rename to cassandra/cqlengine/tests/test_timestamp.py diff --git a/cqlengine/tests/test_transaction.py b/cassandra/cqlengine/tests/test_transaction.py similarity index 100% rename from cqlengine/tests/test_transaction.py rename to cassandra/cqlengine/tests/test_transaction.py diff --git a/cqlengine/tests/test_ttl.py b/cassandra/cqlengine/tests/test_ttl.py similarity index 100% rename from cqlengine/tests/test_ttl.py rename to cassandra/cqlengine/tests/test_ttl.py diff --git a/cqlengine/VERSION b/cqlengine/VERSION deleted file mode 100644 index 885415662f..0000000000 --- a/cqlengine/VERSION +++ /dev/null @@ -1 +0,0 @@ -0.21.0 diff --git a/cqlengine/__init__.py b/cqlengine/__init__.py deleted file mode 100644 index 0961afd333..0000000000 --- a/cqlengine/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -import pkg_resources - -from cassandra import ConsistencyLevel - -from cqlengine.columns import * -from cqlengine.functions import * -from cqlengine.models import Model -from cqlengine.query import BatchQuery - - -__cqlengine_version_path__ = pkg_resources.resource_filename('cqlengine', - 'VERSION') -__version__ = open(__cqlengine_version_path__, 'r').readline().strip() - -# compaction -SizeTieredCompactionStrategy = "SizeTieredCompactionStrategy" -LeveledCompactionStrategy = "LeveledCompactionStrategy" - -# Caching constants. -CACHING_ALL = "ALL" -CACHING_KEYS_ONLY = "KEYS_ONLY" -CACHING_ROWS_ONLY = "ROWS_ONLY" -CACHING_NONE = "NONE" - -ANY = ConsistencyLevel.ANY -ONE = ConsistencyLevel.ONE -TWO = ConsistencyLevel.TWO -THREE = ConsistencyLevel.THREE -QUORUM = ConsistencyLevel.QUORUM -LOCAL_ONE = ConsistencyLevel.LOCAL_ONE -LOCAL_QUORUM = ConsistencyLevel.LOCAL_QUORUM -EACH_QUORUM = ConsistencyLevel.EACH_QUORUM -ALL = ConsistencyLevel.ALL From 46985097277fd491c30f833bc4024b7deb9dac01 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 4 Feb 2015 11:56:19 -0600 Subject: [PATCH 1199/3726] Reloate cqlengine tests to integration tests, update imports Not integrated with existing integration ccm operation. There is a handful of failing cqlengine tests in the commit, that worked before the move. --- cassandra/cqlengine/tests/__init__.py | 30 ------------ tests/integration/cqlengine/__init__.py | 13 ++++++ .../integration/cqlengine}/base.py | 12 +---- .../cqlengine}/columns/__init__.py | 0 .../columns/test_container_columns.py | 8 ++-- .../cqlengine}/columns/test_counter_column.py | 9 ++-- .../cqlengine}/columns/test_static_column.py | 22 ++++----- .../cqlengine}/columns/test_validation.py | 46 ++++++++----------- .../cqlengine}/columns/test_value_io.py | 12 ++--- .../cqlengine}/connections/__init__.py | 0 .../cqlengine}/management/__init__.py | 0 .../management/test_compaction_settings.py | 20 ++++---- .../cqlengine}/management/test_management.py | 39 ++++++++-------- .../integration/cqlengine}/model/__init__.py | 0 .../model/test_class_construction.py | 30 ++++++------ .../model/test_equality_operations.py | 10 ++-- .../cqlengine}/model/test_model.py | 4 +- .../cqlengine}/model/test_model_io.py | 12 ++--- .../cqlengine}/model/test_polymorphism.py | 10 ++-- .../cqlengine}/model/test_updates.py | 10 ++-- .../cqlengine}/model/test_validation.py | 0 .../cqlengine}/model/test_value_lists.py | 10 ++-- .../cqlengine}/operators/__init__.py | 0 .../operators/test_assignment_operators.py | 0 .../operators/test_base_operator.py | 4 +- .../operators/test_where_operators.py | 2 +- .../integration/cqlengine}/query/__init__.py | 0 .../cqlengine}/query/test_batch_query.py | 18 ++++---- .../cqlengine}/query/test_datetime_queries.py | 16 +++---- .../cqlengine}/query/test_named.py | 15 +++--- .../cqlengine}/query/test_queryoperators.py | 21 +++++---- .../cqlengine}/query/test_queryset.py | 22 ++++----- .../cqlengine}/query/test_updates.py | 12 ++--- .../cqlengine}/statements/__init__.py | 0 .../statements/test_assignment_clauses.py | 2 +- .../statements/test_assignment_statement.py | 4 +- .../cqlengine}/statements/test_base_clause.py | 2 +- .../statements/test_base_statement.py | 2 +- .../statements/test_delete_statement.py | 4 +- .../statements/test_insert_statement.py | 2 +- .../cqlengine}/statements/test_quoter.py | 0 .../statements/test_select_statement.py | 4 +- .../statements/test_update_statement.py | 6 +-- .../statements/test_where_clause.py | 4 +- .../cqlengine}/test_batch_query.py | 14 ++---- .../cqlengine}/test_consistency.py | 39 ++++++++-------- .../cqlengine}/test_ifnotexists.py | 16 ++++--- .../integration/cqlengine}/test_load.py | 17 ++++--- .../integration/cqlengine}/test_timestamp.py | 15 +++--- .../cqlengine}/test_transaction.py | 25 +++++----- .../integration/cqlengine}/test_ttl.py | 10 ++-- 51 files changed, 269 insertions(+), 304 deletions(-) delete mode 100644 cassandra/cqlengine/tests/__init__.py create mode 100644 tests/integration/cqlengine/__init__.py rename {cassandra/cqlengine/tests => tests/integration/cqlengine}/base.py (70%) rename {cassandra/cqlengine/tests => tests/integration/cqlengine}/columns/__init__.py (100%) rename {cassandra/cqlengine/tests => tests/integration/cqlengine}/columns/test_container_columns.py (98%) rename {cassandra/cqlengine/tests => tests/integration/cqlengine}/columns/test_counter_column.py (92%) rename {cassandra/cqlengine/tests => tests/integration/cqlengine}/columns/test_static_column.py (77%) rename {cassandra/cqlengine/tests => tests/integration/cqlengine}/columns/test_validation.py (92%) rename {cassandra/cqlengine/tests => tests/integration/cqlengine}/columns/test_value_io.py (93%) rename {cassandra/cqlengine/tests => tests/integration/cqlengine}/connections/__init__.py (100%) rename {cassandra/cqlengine/tests => tests/integration/cqlengine}/management/__init__.py (100%) rename {cassandra/cqlengine/tests => tests/integration/cqlengine}/management/test_compaction_settings.py (92%) rename {cassandra/cqlengine/tests => tests/integration/cqlengine}/management/test_management.py (89%) rename {cassandra/cqlengine/tests => tests/integration/cqlengine}/model/__init__.py (100%) rename {cassandra/cqlengine/tests => tests/integration/cqlengine}/model/test_class_construction.py (93%) rename {cassandra/cqlengine/tests => tests/integration/cqlengine}/model/test_equality_operations.py (84%) rename {cassandra/cqlengine/tests => tests/integration/cqlengine}/model/test_model.py (93%) rename {cassandra/cqlengine/tests => tests/integration/cqlengine}/model/test_model_io.py (96%) rename {cassandra/cqlengine/tests => tests/integration/cqlengine}/model/test_polymorphism.py (96%) rename {cassandra/cqlengine/tests => tests/integration/cqlengine}/model/test_updates.py (91%) rename {cassandra/cqlengine/tests => tests/integration/cqlengine}/model/test_validation.py (100%) rename {cassandra/cqlengine/tests => tests/integration/cqlengine}/model/test_value_lists.py (86%) rename {cassandra/cqlengine/tests => tests/integration/cqlengine}/operators/__init__.py (100%) rename {cassandra/cqlengine/tests => tests/integration/cqlengine}/operators/test_assignment_operators.py (100%) rename {cassandra/cqlengine/tests => tests/integration/cqlengine}/operators/test_base_operator.py (58%) rename {cassandra/cqlengine/tests => tests/integration/cqlengine}/operators/test_where_operators.py (96%) rename {cassandra/cqlengine/tests => tests/integration/cqlengine}/query/__init__.py (100%) rename {cassandra/cqlengine/tests => tests/integration/cqlengine}/query/test_batch_query.py (94%) rename {cassandra/cqlengine/tests => tests/integration/cqlengine}/query/test_datetime_queries.py (81%) rename {cassandra/cqlengine/tests => tests/integration/cqlengine}/query/test_named.py (95%) rename {cassandra/cqlengine/tests => tests/integration/cqlengine}/query/test_queryoperators.py (86%) rename {cassandra/cqlengine/tests => tests/integration/cqlengine}/query/test_queryset.py (98%) rename {cassandra/cqlengine/tests => tests/integration/cqlengine}/query/test_updates.py (96%) rename {cassandra/cqlengine/tests => tests/integration/cqlengine}/statements/__init__.py (100%) rename {cassandra/cqlengine/tests => tests/integration/cqlengine}/statements/test_assignment_clauses.py (98%) rename {cassandra/cqlengine/tests => tests/integration/cqlengine}/statements/test_assignment_statement.py (69%) rename {cassandra/cqlengine/tests => tests/integration/cqlengine}/statements/test_base_clause.py (84%) rename {cassandra/cqlengine/tests => tests/integration/cqlengine}/statements/test_base_statement.py (80%) rename {cassandra/cqlengine/tests => tests/integration/cqlengine}/statements/test_delete_statement.py (94%) rename {cassandra/cqlengine/tests => tests/integration/cqlengine}/statements/test_insert_statement.py (93%) rename {cassandra/cqlengine/tests => tests/integration/cqlengine}/statements/test_quoter.py (100%) rename {cassandra/cqlengine/tests => tests/integration/cqlengine}/statements/test_select_statement.py (96%) rename {cassandra/cqlengine/tests => tests/integration/cqlengine}/statements/test_update_statement.py (95%) rename {cassandra/cqlengine/tests => tests/integration/cqlengine}/statements/test_where_clause.py (86%) rename {cassandra/cqlengine/tests => tests/integration/cqlengine}/test_batch_query.py (95%) rename {cassandra/cqlengine/tests => tests/integration/cqlengine}/test_consistency.py (67%) rename {cassandra/cqlengine/tests => tests/integration/cqlengine}/test_ifnotexists.py (94%) rename {cassandra/cqlengine/tests => tests/integration/cqlengine}/test_load.py (70%) rename {cassandra/cqlengine/tests => tests/integration/cqlengine}/test_timestamp.py (95%) rename {cassandra/cqlengine/tests => tests/integration/cqlengine}/test_transaction.py (85%) rename {cassandra/cqlengine/tests => tests/integration/cqlengine}/test_ttl.py (94%) diff --git a/cassandra/cqlengine/tests/__init__.py b/cassandra/cqlengine/tests/__init__.py deleted file mode 100644 index e03842d32b..0000000000 --- a/cassandra/cqlengine/tests/__init__.py +++ /dev/null @@ -1,30 +0,0 @@ -import os - - -from cqlengine import connection -from cqlengine.management import create_keyspace - - -def setup_package(): - try: - CASSANDRA_VERSION = int(os.environ["CASSANDRA_VERSION"]) - except: - print("CASSANDRA_VERSION must be set as an environment variable. " - "One of (12, 20, 21)") - raise - - if os.environ.get('CASSANDRA_TEST_HOST'): - CASSANDRA_TEST_HOST = os.environ['CASSANDRA_TEST_HOST'] - else: - CASSANDRA_TEST_HOST = 'localhost' - - if CASSANDRA_VERSION < 20: - protocol_version = 1 - else: - protocol_version = 2 - - connection.setup([CASSANDRA_TEST_HOST], - protocol_version=protocol_version, - default_keyspace='cqlengine_test') - - create_keyspace("cqlengine_test", replication_factor=1, strategy_class="SimpleStrategy") diff --git a/tests/integration/cqlengine/__init__.py b/tests/integration/cqlengine/__init__.py new file mode 100644 index 0000000000..8fa5480e22 --- /dev/null +++ b/tests/integration/cqlengine/__init__.py @@ -0,0 +1,13 @@ +from cassandra.cqlengine import connection +from cassandra.cqlengine.management import create_keyspace + +from tests.integration import PROTOCOL_VERSION + + +def setup_package(): + keyspace = 'cqlengine_test' + connection.setup(['localhost'], + protocol_version=PROTOCOL_VERSION, + default_keyspace=keyspace) + + create_keyspace(keyspace, replication_factor=1, strategy_class="SimpleStrategy") diff --git a/cassandra/cqlengine/tests/base.py b/tests/integration/cqlengine/base.py similarity index 70% rename from cassandra/cqlengine/tests/base.py rename to tests/integration/cqlengine/base.py index 5bd66b648c..ea3f0b5989 100644 --- a/cassandra/cqlengine/tests/base.py +++ b/tests/integration/cqlengine/base.py @@ -1,19 +1,11 @@ -from unittest import TestCase -import os import sys -import six -from cqlengine.connection import get_session - +from unittest import TestCase -CASSANDRA_VERSION = int(os.environ['CASSANDRA_VERSION']) -PROTOCOL_VERSION = 1 if CASSANDRA_VERSION < 20 else 2 +from cassandra.cqlengine.connection import get_session class BaseCassEngTestCase(TestCase): - # @classmethod - # def setUpClass(cls): - # super(BaseCassEngTestCase, cls).setUpClass() session = None def setUp(self): diff --git a/cassandra/cqlengine/tests/columns/__init__.py b/tests/integration/cqlengine/columns/__init__.py similarity index 100% rename from cassandra/cqlengine/tests/columns/__init__.py rename to tests/integration/cqlengine/columns/__init__.py diff --git a/cassandra/cqlengine/tests/columns/test_container_columns.py b/tests/integration/cqlengine/columns/test_container_columns.py similarity index 98% rename from cassandra/cqlengine/tests/columns/test_container_columns.py rename to tests/integration/cqlengine/columns/test_container_columns.py index f8c0db595b..4de46c1518 100644 --- a/cassandra/cqlengine/tests/columns/test_container_columns.py +++ b/tests/integration/cqlengine/columns/test_container_columns.py @@ -3,10 +3,10 @@ from uuid import uuid4 import six -from cqlengine import Model, ValidationError -from cqlengine import columns -from cqlengine.management import sync_table, drop_table -from cqlengine.tests.base import BaseCassEngTestCase +from cassandra.cqlengine.models import Model, ValidationError +import cassandra.cqlengine.columns as columns +from cassandra.cqlengine.management import sync_table, drop_table +from tests.integration.cqlengine.base import BaseCassEngTestCase class TestSetModel(Model): diff --git a/cassandra/cqlengine/tests/columns/test_counter_column.py b/tests/integration/cqlengine/columns/test_counter_column.py similarity index 92% rename from cassandra/cqlengine/tests/columns/test_counter_column.py rename to tests/integration/cqlengine/columns/test_counter_column.py index 964cacb8db..074f7e52f0 100644 --- a/cassandra/cqlengine/tests/columns/test_counter_column.py +++ b/tests/integration/cqlengine/columns/test_counter_column.py @@ -1,10 +1,9 @@ from uuid import uuid4 -from cqlengine import Model -from cqlengine import columns -from cqlengine.management import sync_table, drop_table -from cqlengine.models import ModelDefinitionException -from cqlengine.tests.base import BaseCassEngTestCase +from cassandra.cqlengine import columns +from cassandra.cqlengine.management import sync_table, drop_table +from cassandra.cqlengine.models import Model, ModelDefinitionException +from tests.integration.cqlengine.base import BaseCassEngTestCase class TestCounterModel(Model): diff --git a/cassandra/cqlengine/tests/columns/test_static_column.py b/tests/integration/cqlengine/columns/test_static_column.py similarity index 77% rename from cassandra/cqlengine/tests/columns/test_static_column.py rename to tests/integration/cqlengine/columns/test_static_column.py index d7e6dc9b81..d3f537f2cf 100644 --- a/cassandra/cqlengine/tests/columns/test_static_column.py +++ b/tests/integration/cqlengine/columns/test_static_column.py @@ -1,11 +1,12 @@ -from uuid import uuid4 from unittest import skipUnless -from cqlengine import Model -from cqlengine import columns -from cqlengine.management import sync_table, drop_table -from cqlengine.models import ModelDefinitionException -from cqlengine.tests.base import BaseCassEngTestCase -from cqlengine.tests.base import CASSANDRA_VERSION, PROTOCOL_VERSION +from uuid import uuid4 + +from cassandra.cqlengine import columns +from cassandra.cqlengine.management import sync_table, drop_table +from cassandra.cqlengine.models import Model + +from tests.integration.cqlengine.base import BaseCassEngTestCase +from tests.integration import PROTOCOL_VERSION class TestStaticModel(Model): @@ -15,21 +16,20 @@ class TestStaticModel(Model): text = columns.Text() +@skipUnless(PROTOCOL_VERSION >= 2, "only runs against the cql3 protocol v2.0") class TestStaticColumn(BaseCassEngTestCase): @classmethod def setUpClass(cls): super(TestStaticColumn, cls).setUpClass() drop_table(TestStaticModel) - if CASSANDRA_VERSION >= 20: - sync_table(TestStaticModel) + sync_table(TestStaticModel) @classmethod def tearDownClass(cls): super(TestStaticColumn, cls).tearDownClass() drop_table(TestStaticModel) - @skipUnless(PROTOCOL_VERSION >= 2, "only runs against the cql3 protocol v2.0") def test_mixed_updates(self): """ Tests that updates on both static and non-static columns work as intended """ instance = TestStaticModel.create() @@ -45,7 +45,6 @@ def test_mixed_updates(self): assert actual.static == "it's still shared" - @skipUnless(PROTOCOL_VERSION >= 2, "only runs against the cql3 protocol v2.0") def test_static_only_updates(self): """ Tests that updates on static only column work as intended """ instance = TestStaticModel.create() @@ -59,7 +58,6 @@ def test_static_only_updates(self): actual = TestStaticModel.get(partition=u.partition) assert actual.static == "it's still shared" - @skipUnless(PROTOCOL_VERSION >= 2, "only runs against the cql3 protocol v2.0") def test_static_with_null_cluster_key(self): """ Tests that save/update/delete works for static column works when clustering key is null""" instance = TestStaticModel.create(cluster=None, static = "it's shared") diff --git a/cassandra/cqlengine/tests/columns/test_validation.py b/tests/integration/cqlengine/columns/test_validation.py similarity index 92% rename from cassandra/cqlengine/tests/columns/test_validation.py rename to tests/integration/cqlengine/columns/test_validation.py index a4a939e854..d1dd90cc76 100644 --- a/cassandra/cqlengine/tests/columns/test_validation.py +++ b/tests/integration/cqlengine/columns/test_validation.py @@ -1,34 +1,26 @@ -#tests the behavior of the column classes -from datetime import datetime, timedelta -from datetime import date -from datetime import tzinfo +from datetime import datetime, timedelta, date, tzinfo from decimal import Decimal as D from unittest import TestCase from uuid import uuid4, uuid1 + from cassandra import InvalidRequest -import six -from cqlengine import ValidationError -from cqlengine.connection import execute - -from cqlengine.tests.base import BaseCassEngTestCase - -from cqlengine.columns import Column, TimeUUID -from cqlengine.columns import Bytes -from cqlengine.columns import Ascii -from cqlengine.columns import Text -from cqlengine.columns import Integer -from cqlengine.columns import BigInt -from cqlengine.columns import VarInt -from cqlengine.columns import DateTime -from cqlengine.columns import Date -from cqlengine.columns import UUID -from cqlengine.columns import Boolean -from cqlengine.columns import Float -from cqlengine.columns import Decimal -from cqlengine.columns import Inet - -from cqlengine.management import sync_table, drop_table -from cqlengine.models import Model +from cassandra.cqlengine.management import sync_table, drop_table +from cassandra.cqlengine.models import Model, ValidationError +from cassandra.cqlengine.connection import execute + +from cassandra.cqlengine.columns import TimeUUID +from cassandra.cqlengine.columns import Text +from cassandra.cqlengine.columns import Integer +from cassandra.cqlengine.columns import BigInt +from cassandra.cqlengine.columns import VarInt +from cassandra.cqlengine.columns import DateTime +from cassandra.cqlengine.columns import Date +from cassandra.cqlengine.columns import UUID +from cassandra.cqlengine.columns import Boolean +from cassandra.cqlengine.columns import Decimal +from cassandra.cqlengine.columns import Inet + +from tests.integration.cqlengine.base import BaseCassEngTestCase class TestDatetime(BaseCassEngTestCase): diff --git a/cassandra/cqlengine/tests/columns/test_value_io.py b/tests/integration/cqlengine/columns/test_value_io.py similarity index 93% rename from cassandra/cqlengine/tests/columns/test_value_io.py rename to tests/integration/cqlengine/columns/test_value_io.py index d3b589f029..16e34bf2cc 100644 --- a/cassandra/cqlengine/tests/columns/test_value_io.py +++ b/tests/integration/cqlengine/columns/test_value_io.py @@ -3,13 +3,13 @@ from uuid import uuid1, uuid4, UUID import six -from cqlengine.tests.base import BaseCassEngTestCase +from tests.integration.cqlengine.base import BaseCassEngTestCase -from cqlengine.management import sync_table -from cqlengine.management import drop_table -from cqlengine.models import Model -from cqlengine.columns import ValueQuoter -from cqlengine import columns +from cassandra.cqlengine.management import sync_table +from cassandra.cqlengine.management import drop_table +from cassandra.cqlengine.models import Model +from cassandra.cqlengine.columns import ValueQuoter +from cassandra.cqlengine import columns import unittest diff --git a/cassandra/cqlengine/tests/connections/__init__.py b/tests/integration/cqlengine/connections/__init__.py similarity index 100% rename from cassandra/cqlengine/tests/connections/__init__.py rename to tests/integration/cqlengine/connections/__init__.py diff --git a/cassandra/cqlengine/tests/management/__init__.py b/tests/integration/cqlengine/management/__init__.py similarity index 100% rename from cassandra/cqlengine/tests/management/__init__.py rename to tests/integration/cqlengine/management/__init__.py diff --git a/cassandra/cqlengine/tests/management/test_compaction_settings.py b/tests/integration/cqlengine/management/test_compaction_settings.py similarity index 92% rename from cassandra/cqlengine/tests/management/test_compaction_settings.py rename to tests/integration/cqlengine/management/test_compaction_settings.py index 9607334e7d..5358a4aa6b 100644 --- a/cassandra/cqlengine/tests/management/test_compaction_settings.py +++ b/tests/integration/cqlengine/management/test_compaction_settings.py @@ -1,11 +1,13 @@ import copy import json -from time import sleep -from mock import patch, MagicMock -from cqlengine import Model, columns, SizeTieredCompactionStrategy, LeveledCompactionStrategy -from cqlengine.exceptions import CQLEngineException -from cqlengine.management import get_compaction_options, drop_table, sync_table, get_table_settings -from cqlengine.tests.base import BaseCassEngTestCase +from mock import patch + +from cassandra.cqlengine import columns, SizeTieredCompactionStrategy, LeveledCompactionStrategy +from cassandra.cqlengine.exceptions import CQLEngineException +from cassandra.cqlengine.management import get_compaction_options, drop_table, sync_table, get_table_settings +from cassandra.cqlengine.models import Model + +from tests.integration.cqlengine.base import BaseCassEngTestCase class CompactionModel(Model): @@ -81,7 +83,7 @@ class LeveledcompactionTestTable(Model): user_id = columns.UUID(primary_key=True) name = columns.Text() -from cqlengine.management import schema_columnfamilies +from cassandra.cqlengine.management import schema_columnfamilies class AlterTableTest(BaseCassEngTestCase): @@ -93,7 +95,7 @@ def test_alter_is_called_table(self): assert mock.called == 1 def test_compaction_not_altered_without_changes_leveled(self): - from cqlengine.management import update_compaction + from cassandra.cqlengine.management import update_compaction class LeveledCompactionChangesDetectionTest(Model): @@ -110,7 +112,7 @@ class LeveledCompactionChangesDetectionTest(Model): assert not update_compaction(LeveledCompactionChangesDetectionTest) def test_compaction_not_altered_without_changes_sizetiered(self): - from cqlengine.management import update_compaction + from cassandra.cqlengine.management import update_compaction class SizeTieredCompactionChangesDetectionTest(Model): diff --git a/cassandra/cqlengine/tests/management/test_management.py b/tests/integration/cqlengine/management/test_management.py similarity index 89% rename from cassandra/cqlengine/tests/management/test_management.py rename to tests/integration/cqlengine/management/test_management.py index 5dae9ff18f..3359547a07 100644 --- a/cassandra/cqlengine/tests/management/test_management.py +++ b/tests/integration/cqlengine/management/test_management.py @@ -1,16 +1,17 @@ import mock -from cqlengine import ALL, CACHING_ALL, CACHING_NONE -from cqlengine.connection import get_session -from cqlengine.exceptions import CQLEngineException -from cqlengine.management import get_fields, sync_table, drop_table -from cqlengine.tests.base import BaseCassEngTestCase -from cqlengine.tests.base import CASSANDRA_VERSION, PROTOCOL_VERSION -from cqlengine import management -from cqlengine.tests.query.test_queryset import TestModel -from cqlengine.models import Model -from cqlengine import columns, SizeTieredCompactionStrategy, LeveledCompactionStrategy from unittest import skipUnless +from cassandra.cqlengine import CACHING_ALL, CACHING_NONE +from cassandra.cqlengine.connection import get_session +from cassandra.cqlengine.exceptions import CQLEngineException +from cassandra.cqlengine import management +from cassandra.cqlengine.management import get_fields, sync_table, drop_table +from cassandra.cqlengine.models import Model +from cassandra.cqlengine import columns, SizeTieredCompactionStrategy, LeveledCompactionStrategy + +from tests.integration import CASSANDRA_VERSION, PROTOCOL_VERSION +from tests.integration.cqlengine.base import BaseCassEngTestCase +from tests.integration.cqlengine.query.test_queryset import TestModel class CreateKeyspaceTest(BaseCassEngTestCase): def test_create_succeeeds(self): @@ -132,7 +133,7 @@ class ModelWithTableProperties(Model): key = columns.UUID(primary_key=True) # kind of a hack, but we only test this property on C >= 2.0 -if CASSANDRA_VERSION >= 20: +if CASSANDRA_VERSION >= '2.0.0': ModelWithTableProperties.__memtable_flush_period_in_ms__ = 43681 ModelWithTableProperties.__index_interval__ = 98706 ModelWithTableProperties.__default_time_to_live__ = 4756 @@ -155,15 +156,15 @@ def test_set_table_properties(self): # TODO: due to a bug in the native driver i'm not seeing the local read repair chance show up # 'local_read_repair_chance': 0.50811, } - if CASSANDRA_VERSION <= 20: + if CASSANDRA_VERSION >= '2.0.0': expected['caching'] = CACHING_ALL expected['replicate_on_write'] = False - if CASSANDRA_VERSION == 20: + if CASSANDRA_VERSION >= '2.0.0': expected['populate_io_cache_on_flush'] = True expected['index_interval'] = 98706 - if CASSANDRA_VERSION >= 20: + if CASSANDRA_VERSION >= '2.0.0': expected['default_time_to_live'] = 4756 expected['memtable_flush_period_in_ms'] = 43681 @@ -180,7 +181,7 @@ def test_table_property_update(self): ModelWithTableProperties.__replicate_on_write__ = True ModelWithTableProperties.__dclocal_read_repair_chance__ = 0.12732 - if CASSANDRA_VERSION >= 20: + if CASSANDRA_VERSION >= '2.0.0': ModelWithTableProperties.__default_time_to_live__ = 65178 ModelWithTableProperties.__memtable_flush_period_in_ms__ = 60210 ModelWithTableProperties.__index_interval__ = 94207 @@ -195,15 +196,15 @@ def test_table_property_update(self): 'read_repair_chance': 0.2989, #'local_read_repair_chance': 0.12732, } - if CASSANDRA_VERSION >= 20: + if CASSANDRA_VERSION >= '2.0.0': expected['memtable_flush_period_in_ms'] = 60210 expected['default_time_to_live'] = 65178 - if CASSANDRA_VERSION == 20: + if CASSANDRA_VERSION >= '2.0.0': expected['index_interval'] = 94207 # these featuers removed in cassandra 2.1 - if CASSANDRA_VERSION <= 20: + if CASSANDRA_VERSION >= '2.0.0': expected['caching'] = CACHING_NONE expected['replicate_on_write'] = True expected['populate_io_cache_on_flush'] = False @@ -267,7 +268,7 @@ class StaticModel(Model): from mock import patch - from cqlengine.connection import get_session + from cassandra.cqlengine.connection import get_session session = get_session() with patch.object(session, "execute", wraps=session.execute) as m: diff --git a/cassandra/cqlengine/tests/model/__init__.py b/tests/integration/cqlengine/model/__init__.py similarity index 100% rename from cassandra/cqlengine/tests/model/__init__.py rename to tests/integration/cqlengine/model/__init__.py diff --git a/cassandra/cqlengine/tests/model/test_class_construction.py b/tests/integration/cqlengine/model/test_class_construction.py similarity index 93% rename from cassandra/cqlengine/tests/model/test_class_construction.py rename to tests/integration/cqlengine/model/test_class_construction.py index 7616d79c2a..977240b187 100644 --- a/cassandra/cqlengine/tests/model/test_class_construction.py +++ b/tests/integration/cqlengine/model/test_class_construction.py @@ -1,12 +1,12 @@ from uuid import uuid4 import warnings -from cqlengine.query import QueryException, ModelQuerySet, DMLQuery -from cqlengine.tests.base import BaseCassEngTestCase -from cqlengine.exceptions import ModelException, CQLEngineException -from cqlengine.models import Model, ModelDefinitionException, ColumnQueryEvaluator, UndefinedKeyspaceWarning -from cqlengine import columns -import cqlengine +from cassandra.cqlengine import columns +from cassandra.cqlengine.exceptions import ModelException, CQLEngineException +from cassandra.cqlengine.models import Model, ModelDefinitionException, ColumnQueryEvaluator +from cassandra.cqlengine.query import ModelQuerySet, DMLQuery + +from tests.integration.cqlengine.base import BaseCassEngTestCase class TestModelClassFunction(BaseCassEngTestCase): """ @@ -152,12 +152,12 @@ def test_partition_keys(self): """ Test compound partition key definition """ - class ModelWithPartitionKeys(cqlengine.Model): + class ModelWithPartitionKeys(Model): id = columns.UUID(primary_key=True, default=lambda:uuid4()) - c1 = cqlengine.Text(primary_key=True) - p1 = cqlengine.Text(partition_key=True) - p2 = cqlengine.Text(partition_key=True) + c1 = columns.Text(primary_key=True) + p1 = columns.Text(partition_key=True) + p2 = columns.Text(partition_key=True) cols = ModelWithPartitionKeys._columns @@ -231,12 +231,12 @@ class NoKeyspace(Model): class TestManualTableNaming(BaseCassEngTestCase): - class RenamedTest(cqlengine.Model): + class RenamedTest(Model): __keyspace__ = 'whatever' __table_name__ = 'manual_name' - id = cqlengine.UUID(primary_key=True) - data = cqlengine.Text() + id = columns.UUID(primary_key=True) + data = columns.Text() def test_proper_table_naming(self): assert self.RenamedTest.column_family_name(include_keyspace=False) == 'manual_name' @@ -286,7 +286,7 @@ def test_attempting_to_save_abstract_model_fails(self): def test_attempting_to_create_abstract_table_fails(self): """ Attempting to create a table from an abstract model should fail """ - from cqlengine.management import sync_table + from cassandra.cqlengine.management import sync_table with self.assertRaises(CQLEngineException): sync_table(AbstractModelWithFullCols) @@ -303,7 +303,7 @@ def test_abstract_columns_are_inherited(self): def test_concrete_class_table_creation_cycle(self): """ Tests that models with inherited abstract classes can be created, and have io performed """ - from cqlengine.management import sync_table, drop_table + from cassandra.cqlengine.management import sync_table, drop_table sync_table(ConcreteModelWithCol) w1 = ConcreteModelWithCol.create(pkey=5, data=6) diff --git a/cassandra/cqlengine/tests/model/test_equality_operations.py b/tests/integration/cqlengine/model/test_equality_operations.py similarity index 84% rename from cassandra/cqlengine/tests/model/test_equality_operations.py rename to tests/integration/cqlengine/model/test_equality_operations.py index 84855ad840..b5e89ce154 100644 --- a/cassandra/cqlengine/tests/model/test_equality_operations.py +++ b/tests/integration/cqlengine/model/test_equality_operations.py @@ -1,11 +1,11 @@ from unittest import skip from uuid import uuid4 -from cqlengine.tests.base import BaseCassEngTestCase +from tests.integration.cqlengine.base import BaseCassEngTestCase -from cqlengine.management import sync_table -from cqlengine.management import drop_table -from cqlengine.models import Model -from cqlengine import columns +from cassandra.cqlengine.management import sync_table +from cassandra.cqlengine.management import drop_table +from cassandra.cqlengine.models import Model +from cassandra.cqlengine import columns class TestModel(Model): diff --git a/cassandra/cqlengine/tests/model/test_model.py b/tests/integration/cqlengine/model/test_model.py similarity index 93% rename from cassandra/cqlengine/tests/model/test_model.py rename to tests/integration/cqlengine/model/test_model.py index 592ff4b86a..4b11ebb34c 100644 --- a/cassandra/cqlengine/tests/model/test_model.py +++ b/tests/integration/cqlengine/model/test_model.py @@ -1,7 +1,7 @@ from unittest import TestCase -from cqlengine.models import Model, ModelDefinitionException -from cqlengine import columns +from cassandra.cqlengine.models import Model, ModelDefinitionException +from cassandra.cqlengine import columns class TestModel(TestCase): diff --git a/cassandra/cqlengine/tests/model/test_model_io.py b/tests/integration/cqlengine/model/test_model_io.py similarity index 96% rename from cassandra/cqlengine/tests/model/test_model_io.py rename to tests/integration/cqlengine/model/test_model_io.py index e15c5c43df..51cac3c55d 100644 --- a/cassandra/cqlengine/tests/model/test_model_io.py +++ b/tests/integration/cqlengine/model/test_model_io.py @@ -2,13 +2,13 @@ import random from datetime import date from operator import itemgetter -from cqlengine.exceptions import CQLEngineException -from cqlengine.tests.base import BaseCassEngTestCase +from cassandra.cqlengine.exceptions import CQLEngineException +from tests.integration.cqlengine.base import BaseCassEngTestCase -from cqlengine.management import sync_table -from cqlengine.management import drop_table -from cqlengine.models import Model -from cqlengine import columns +from cassandra.cqlengine.management import sync_table +from cassandra.cqlengine.management import drop_table +from cassandra.cqlengine.models import Model +from cassandra.cqlengine import columns class TestModel(Model): diff --git a/cassandra/cqlengine/tests/model/test_polymorphism.py b/tests/integration/cqlengine/model/test_polymorphism.py similarity index 96% rename from cassandra/cqlengine/tests/model/test_polymorphism.py rename to tests/integration/cqlengine/model/test_polymorphism.py index 426269095d..c569d48d35 100644 --- a/cassandra/cqlengine/tests/model/test_polymorphism.py +++ b/tests/integration/cqlengine/model/test_polymorphism.py @@ -1,11 +1,11 @@ import uuid import mock -from cqlengine import columns -from cqlengine import models -from cqlengine.connection import get_session -from cqlengine.tests.base import BaseCassEngTestCase -from cqlengine import management +from cassandra.cqlengine import columns +from cassandra.cqlengine import models +from cassandra.cqlengine.connection import get_session +from tests.integration.cqlengine.base import BaseCassEngTestCase +from cassandra.cqlengine import management class TestPolymorphicClassConstruction(BaseCassEngTestCase): diff --git a/cassandra/cqlengine/tests/model/test_updates.py b/tests/integration/cqlengine/model/test_updates.py similarity index 91% rename from cassandra/cqlengine/tests/model/test_updates.py rename to tests/integration/cqlengine/model/test_updates.py index 77f64f7289..eecad304bc 100644 --- a/cassandra/cqlengine/tests/model/test_updates.py +++ b/tests/integration/cqlengine/model/test_updates.py @@ -1,12 +1,12 @@ from uuid import uuid4 from mock import patch -from cqlengine.exceptions import ValidationError +from cassandra.cqlengine.exceptions import ValidationError -from cqlengine.tests.base import BaseCassEngTestCase -from cqlengine.models import Model -from cqlengine import columns -from cqlengine.management import sync_table, drop_table +from tests.integration.cqlengine.base import BaseCassEngTestCase +from cassandra.cqlengine.models import Model +from cassandra.cqlengine import columns +from cassandra.cqlengine.management import sync_table, drop_table class TestUpdateModel(Model): diff --git a/cassandra/cqlengine/tests/model/test_validation.py b/tests/integration/cqlengine/model/test_validation.py similarity index 100% rename from cassandra/cqlengine/tests/model/test_validation.py rename to tests/integration/cqlengine/model/test_validation.py diff --git a/cassandra/cqlengine/tests/model/test_value_lists.py b/tests/integration/cqlengine/model/test_value_lists.py similarity index 86% rename from cassandra/cqlengine/tests/model/test_value_lists.py rename to tests/integration/cqlengine/model/test_value_lists.py index 1b17d784df..893b7d964f 100644 --- a/cassandra/cqlengine/tests/model/test_value_lists.py +++ b/tests/integration/cqlengine/model/test_value_lists.py @@ -1,10 +1,10 @@ import random -from cqlengine.tests.base import BaseCassEngTestCase +from tests.integration.cqlengine.base import BaseCassEngTestCase -from cqlengine.management import sync_table -from cqlengine.management import drop_table -from cqlengine.models import Model -from cqlengine import columns +from cassandra.cqlengine.management import sync_table +from cassandra.cqlengine.management import drop_table +from cassandra.cqlengine.models import Model +from cassandra.cqlengine import columns class TestModel(Model): diff --git a/cassandra/cqlengine/tests/operators/__init__.py b/tests/integration/cqlengine/operators/__init__.py similarity index 100% rename from cassandra/cqlengine/tests/operators/__init__.py rename to tests/integration/cqlengine/operators/__init__.py diff --git a/cassandra/cqlengine/tests/operators/test_assignment_operators.py b/tests/integration/cqlengine/operators/test_assignment_operators.py similarity index 100% rename from cassandra/cqlengine/tests/operators/test_assignment_operators.py rename to tests/integration/cqlengine/operators/test_assignment_operators.py diff --git a/cassandra/cqlengine/tests/operators/test_base_operator.py b/tests/integration/cqlengine/operators/test_base_operator.py similarity index 58% rename from cassandra/cqlengine/tests/operators/test_base_operator.py rename to tests/integration/cqlengine/operators/test_base_operator.py index af13fdb115..cb92935093 100644 --- a/cassandra/cqlengine/tests/operators/test_base_operator.py +++ b/tests/integration/cqlengine/operators/test_base_operator.py @@ -1,9 +1,9 @@ from unittest import TestCase -from cqlengine.operators import BaseQueryOperator, QueryOperatorException +from cassandra.cqlengine.operators import BaseQueryOperator, QueryOperatorException class BaseOperatorTest(TestCase): def test_get_operator_cannot_be_called_from_base_class(self): with self.assertRaises(QueryOperatorException): - BaseQueryOperator.get_operator('*') \ No newline at end of file + BaseQueryOperator.get_operator('*') diff --git a/cassandra/cqlengine/tests/operators/test_where_operators.py b/tests/integration/cqlengine/operators/test_where_operators.py similarity index 96% rename from cassandra/cqlengine/tests/operators/test_where_operators.py rename to tests/integration/cqlengine/operators/test_where_operators.py index a73454c660..5a241effcc 100644 --- a/cassandra/cqlengine/tests/operators/test_where_operators.py +++ b/tests/integration/cqlengine/operators/test_where_operators.py @@ -1,5 +1,5 @@ from unittest import TestCase -from cqlengine.operators import * +from cassandra.cqlengine.operators import * import six diff --git a/cassandra/cqlengine/tests/query/__init__.py b/tests/integration/cqlengine/query/__init__.py similarity index 100% rename from cassandra/cqlengine/tests/query/__init__.py rename to tests/integration/cqlengine/query/__init__.py diff --git a/cassandra/cqlengine/tests/query/test_batch_query.py b/tests/integration/cqlengine/query/test_batch_query.py similarity index 94% rename from cassandra/cqlengine/tests/query/test_batch_query.py rename to tests/integration/cqlengine/query/test_batch_query.py index 3875375fdf..93b37fb000 100644 --- a/cassandra/cqlengine/tests/query/test_batch_query.py +++ b/tests/integration/cqlengine/query/test_batch_query.py @@ -1,15 +1,13 @@ -from datetime import datetime -from unittest import skip -from uuid import uuid4 -import random -from cqlengine import Model, columns -from cqlengine.connection import NOT_SET -from cqlengine.management import drop_table, sync_table -from cqlengine.query import BatchQuery, DMLQuery -from cqlengine.tests.base import BaseCassEngTestCase -from cassandra.cluster import Session import mock +from cassandra.cqlengine import columns +from cassandra.cqlengine.connection import NOT_SET +from cassandra.cqlengine.management import drop_table, sync_table +from cassandra.cqlengine.models import Model +from cassandra.cqlengine.query import BatchQuery, DMLQuery +from tests.integration.cqlengine.base import BaseCassEngTestCase +from cassandra.cluster import Session + class TestMultiKeyModel(Model): diff --git a/cassandra/cqlengine/tests/query/test_datetime_queries.py b/tests/integration/cqlengine/query/test_datetime_queries.py similarity index 81% rename from cassandra/cqlengine/tests/query/test_datetime_queries.py rename to tests/integration/cqlengine/query/test_datetime_queries.py index 704e55e388..8e0ccbdfd0 100644 --- a/cassandra/cqlengine/tests/query/test_datetime_queries.py +++ b/tests/integration/cqlengine/query/test_datetime_queries.py @@ -1,14 +1,14 @@ from datetime import datetime, timedelta from uuid import uuid4 -from cqlengine.tests.base import BaseCassEngTestCase - -from cqlengine.exceptions import ModelException -from cqlengine.management import sync_table -from cqlengine.management import drop_table -from cqlengine.models import Model -from cqlengine import columns -from cqlengine import query +from tests.integration.cqlengine.base import BaseCassEngTestCase + +from cassandra.cqlengine.exceptions import ModelException +from cassandra.cqlengine.management import sync_table +from cassandra.cqlengine.management import drop_table +from cassandra.cqlengine.models import Model +from cassandra.cqlengine import columns +from cassandra.cqlengine import query class DateTimeQueryTestModel(Model): diff --git a/cassandra/cqlengine/tests/query/test_named.py b/tests/integration/cqlengine/query/test_named.py similarity index 95% rename from cassandra/cqlengine/tests/query/test_named.py rename to tests/integration/cqlengine/query/test_named.py index 7e0ccc3d7b..a5f31c554f 100644 --- a/cassandra/cqlengine/tests/query/test_named.py +++ b/tests/integration/cqlengine/query/test_named.py @@ -1,9 +1,10 @@ -from cqlengine import operators -from cqlengine.named import NamedKeyspace -from cqlengine.operators import EqualsOperator, GreaterThanOrEqualOperator -from cqlengine.query import ResultObject -from cqlengine.tests.query.test_queryset import BaseQuerySetUsage -from cqlengine.tests.base import BaseCassEngTestCase +from cassandra.cqlengine import operators +from cassandra.cqlengine.named import NamedKeyspace +from cassandra.cqlengine.operators import EqualsOperator, GreaterThanOrEqualOperator +from cassandra.cqlengine.query import ResultObject + +from tests.integration.cqlengine.base import BaseCassEngTestCase +from tests.integration.cqlengine.query.test_queryset import BaseQuerySetUsage class TestQuerySetOperation(BaseCassEngTestCase): @@ -101,7 +102,7 @@ class TestQuerySetCountSelectionAndIteration(BaseQuerySetUsage): def setUpClass(cls): super(TestQuerySetCountSelectionAndIteration, cls).setUpClass() - from cqlengine.tests.query.test_queryset import TestModel + from cassandra.cqlengine.tests.query.test_queryset import TestModel ks,tn = TestModel.column_family_name().split('.') cls.keyspace = NamedKeyspace(ks) diff --git a/cassandra/cqlengine/tests/query/test_queryoperators.py b/tests/integration/cqlengine/query/test_queryoperators.py similarity index 86% rename from cassandra/cqlengine/tests/query/test_queryoperators.py rename to tests/integration/cqlengine/query/test_queryoperators.py index 3facdd6828..7e252209b4 100644 --- a/cassandra/cqlengine/tests/query/test_queryoperators.py +++ b/tests/integration/cqlengine/query/test_queryoperators.py @@ -1,13 +1,14 @@ from datetime import datetime -from cqlengine.columns import DateTime -from cqlengine.tests.base import BaseCassEngTestCase -from cqlengine import columns, Model -from cqlengine import functions -from cqlengine import query -from cqlengine.statements import WhereClause -from cqlengine.operators import EqualsOperator -from cqlengine.management import sync_table, drop_table +from cassandra.cqlengine import columns +from cassandra.cqlengine import functions +from cassandra.cqlengine import query +from cassandra.cqlengine.management import sync_table, drop_table +from cassandra.cqlengine.models import Model +from cassandra.cqlengine.operators import EqualsOperator +from cassandra.cqlengine.statements import WhereClause + +from tests.integration.cqlengine.base import BaseCassEngTestCase class TestQuerySetOperation(BaseCassEngTestCase): @@ -22,7 +23,7 @@ def test_maxtimeuuid_function(self): self.assertEqual(str(where), '"time" = MaxTimeUUID(%(5)s)') ctx = {} where.update_context(ctx) - self.assertEqual(ctx, {'5': DateTime().to_database(now)}) + self.assertEqual(ctx, {'5': columns.DateTime().to_database(now)}) def test_mintimeuuid_function(self): """ @@ -35,7 +36,7 @@ def test_mintimeuuid_function(self): self.assertEqual(str(where), '"time" = MinTimeUUID(%(5)s)') ctx = {} where.update_context(ctx) - self.assertEqual(ctx, {'5': DateTime().to_database(now)}) + self.assertEqual(ctx, {'5': columns.DateTime().to_database(now)}) class TokenTestModel(Model): diff --git a/cassandra/cqlengine/tests/query/test_queryset.py b/tests/integration/cqlengine/query/test_queryset.py similarity index 98% rename from cassandra/cqlengine/tests/query/test_queryset.py rename to tests/integration/cqlengine/query/test_queryset.py index 441d6e213c..240b414ac4 100644 --- a/cassandra/cqlengine/tests/query/test_queryset.py +++ b/tests/integration/cqlengine/query/test_queryset.py @@ -6,22 +6,22 @@ import uuid from cassandra.cluster import Session -from cqlengine.tests.base import BaseCassEngTestCase -from cqlengine.connection import NOT_SET +from tests.integration.cqlengine.base import BaseCassEngTestCase +from cassandra.cqlengine.connection import NOT_SET import mock -from cqlengine import functions -from cqlengine.management import sync_table, drop_table -from cqlengine.models import Model -from cqlengine import columns -from cqlengine import query +from cassandra.cqlengine import functions +from cassandra.cqlengine.management import sync_table, drop_table +from cassandra.cqlengine.models import Model +from cassandra.cqlengine import columns +from cassandra.cqlengine import query from datetime import timedelta from datetime import tzinfo -from cqlengine import statements -from cqlengine import operators +from cassandra.cqlengine import statements +from cassandra.cqlengine import operators -from cqlengine.connection import get_session -from cqlengine.tests.base import PROTOCOL_VERSION +from cassandra.cqlengine.connection import get_session +from tests.integration import PROTOCOL_VERSION class TzOffset(tzinfo): diff --git a/cassandra/cqlengine/tests/query/test_updates.py b/tests/integration/cqlengine/query/test_updates.py similarity index 96% rename from cassandra/cqlengine/tests/query/test_updates.py rename to tests/integration/cqlengine/query/test_updates.py index 97fb7e327d..714d6419be 100644 --- a/cassandra/cqlengine/tests/query/test_updates.py +++ b/tests/integration/cqlengine/query/test_updates.py @@ -1,11 +1,11 @@ from uuid import uuid4 -from cqlengine.exceptions import ValidationError -from cqlengine.query import QueryException +from cassandra.cqlengine.exceptions import ValidationError +from cassandra.cqlengine.query import QueryException -from cqlengine.tests.base import BaseCassEngTestCase -from cqlengine.models import Model -from cqlengine.management import sync_table, drop_table -from cqlengine import columns +from tests.integration.cqlengine.base import BaseCassEngTestCase +from cassandra.cqlengine.models import Model +from cassandra.cqlengine.management import sync_table, drop_table +from cassandra.cqlengine import columns class TestQueryUpdateModel(Model): diff --git a/cassandra/cqlengine/tests/statements/__init__.py b/tests/integration/cqlengine/statements/__init__.py similarity index 100% rename from cassandra/cqlengine/tests/statements/__init__.py rename to tests/integration/cqlengine/statements/__init__.py diff --git a/cassandra/cqlengine/tests/statements/test_assignment_clauses.py b/tests/integration/cqlengine/statements/test_assignment_clauses.py similarity index 98% rename from cassandra/cqlengine/tests/statements/test_assignment_clauses.py rename to tests/integration/cqlengine/statements/test_assignment_clauses.py index 982688a055..aea3b8132e 100644 --- a/cassandra/cqlengine/tests/statements/test_assignment_clauses.py +++ b/tests/integration/cqlengine/statements/test_assignment_clauses.py @@ -1,5 +1,5 @@ from unittest import TestCase -from cqlengine.statements import AssignmentClause, SetUpdateClause, ListUpdateClause, MapUpdateClause, MapDeleteClause, FieldDeleteClause, CounterUpdateClause +from cassandra.cqlengine.statements import AssignmentClause, SetUpdateClause, ListUpdateClause, MapUpdateClause, MapDeleteClause, FieldDeleteClause, CounterUpdateClause class AssignmentClauseTests(TestCase): diff --git a/cassandra/cqlengine/tests/statements/test_assignment_statement.py b/tests/integration/cqlengine/statements/test_assignment_statement.py similarity index 69% rename from cassandra/cqlengine/tests/statements/test_assignment_statement.py rename to tests/integration/cqlengine/statements/test_assignment_statement.py index 695c7cebc8..c33e2177c8 100644 --- a/cassandra/cqlengine/tests/statements/test_assignment_statement.py +++ b/tests/integration/cqlengine/statements/test_assignment_statement.py @@ -1,5 +1,5 @@ from unittest import TestCase -from cqlengine.statements import AssignmentStatement, StatementException +from cassandra.cqlengine.statements import AssignmentStatement, StatementException class AssignmentStatementTest(TestCase): @@ -8,4 +8,4 @@ def test_add_assignment_type_checking(self): """ tests that only assignment clauses can be added to queries """ stmt = AssignmentStatement('table', []) with self.assertRaises(StatementException): - stmt.add_assignment_clause('x=5') \ No newline at end of file + stmt.add_assignment_clause('x=5') diff --git a/cassandra/cqlengine/tests/statements/test_base_clause.py b/tests/integration/cqlengine/statements/test_base_clause.py similarity index 84% rename from cassandra/cqlengine/tests/statements/test_base_clause.py rename to tests/integration/cqlengine/statements/test_base_clause.py index c5bbeb402d..969757a01d 100644 --- a/cassandra/cqlengine/tests/statements/test_base_clause.py +++ b/tests/integration/cqlengine/statements/test_base_clause.py @@ -1,5 +1,5 @@ from unittest import TestCase -from cqlengine.statements import BaseClause +from cassandra.cqlengine.statements import BaseClause class BaseClauseTests(TestCase): diff --git a/cassandra/cqlengine/tests/statements/test_base_statement.py b/tests/integration/cqlengine/statements/test_base_statement.py similarity index 80% rename from cassandra/cqlengine/tests/statements/test_base_statement.py rename to tests/integration/cqlengine/statements/test_base_statement.py index 4acc1b926f..f0b1793a1a 100644 --- a/cassandra/cqlengine/tests/statements/test_base_statement.py +++ b/tests/integration/cqlengine/statements/test_base_statement.py @@ -1,5 +1,5 @@ from unittest import TestCase -from cqlengine.statements import BaseCQLStatement, StatementException +from cassandra.cqlengine.statements import BaseCQLStatement, StatementException class BaseStatementTest(TestCase): diff --git a/cassandra/cqlengine/tests/statements/test_delete_statement.py b/tests/integration/cqlengine/statements/test_delete_statement.py similarity index 94% rename from cassandra/cqlengine/tests/statements/test_delete_statement.py rename to tests/integration/cqlengine/statements/test_delete_statement.py index e658784f71..79b8a78f1c 100644 --- a/cassandra/cqlengine/tests/statements/test_delete_statement.py +++ b/tests/integration/cqlengine/statements/test_delete_statement.py @@ -1,6 +1,6 @@ from unittest import TestCase -from cqlengine.statements import DeleteStatement, WhereClause, MapDeleteClause -from cqlengine.operators import * +from cassandra.cqlengine.statements import DeleteStatement, WhereClause, MapDeleteClause +from cassandra.cqlengine.operators import * import six class DeleteStatementTests(TestCase): diff --git a/cassandra/cqlengine/tests/statements/test_insert_statement.py b/tests/integration/cqlengine/statements/test_insert_statement.py similarity index 93% rename from cassandra/cqlengine/tests/statements/test_insert_statement.py rename to tests/integration/cqlengine/statements/test_insert_statement.py index 7a165cbfe8..a29bc9ba3a 100644 --- a/cassandra/cqlengine/tests/statements/test_insert_statement.py +++ b/tests/integration/cqlengine/statements/test_insert_statement.py @@ -1,5 +1,5 @@ from unittest import TestCase -from cqlengine.statements import InsertStatement, StatementException, AssignmentClause +from cassandra.cqlengine.statements import InsertStatement, StatementException, AssignmentClause import six diff --git a/cassandra/cqlengine/tests/statements/test_quoter.py b/tests/integration/cqlengine/statements/test_quoter.py similarity index 100% rename from cassandra/cqlengine/tests/statements/test_quoter.py rename to tests/integration/cqlengine/statements/test_quoter.py diff --git a/cassandra/cqlengine/tests/statements/test_select_statement.py b/tests/integration/cqlengine/statements/test_select_statement.py similarity index 96% rename from cassandra/cqlengine/tests/statements/test_select_statement.py rename to tests/integration/cqlengine/statements/test_select_statement.py index 62d6ce69d9..2f8165199f 100644 --- a/cassandra/cqlengine/tests/statements/test_select_statement.py +++ b/tests/integration/cqlengine/statements/test_select_statement.py @@ -1,6 +1,6 @@ from unittest import TestCase -from cqlengine.statements import SelectStatement, WhereClause -from cqlengine.operators import * +from cassandra.cqlengine.statements import SelectStatement, WhereClause +from cassandra.cqlengine.operators import * import six class SelectStatementTests(TestCase): diff --git a/cassandra/cqlengine/tests/statements/test_update_statement.py b/tests/integration/cqlengine/statements/test_update_statement.py similarity index 95% rename from cassandra/cqlengine/tests/statements/test_update_statement.py rename to tests/integration/cqlengine/statements/test_update_statement.py index ab5aeec014..72b705d726 100644 --- a/cassandra/cqlengine/tests/statements/test_update_statement.py +++ b/tests/integration/cqlengine/statements/test_update_statement.py @@ -1,7 +1,7 @@ from unittest import TestCase -from cqlengine.columns import Set, List -from cqlengine.operators import * -from cqlengine.statements import (UpdateStatement, WhereClause, +from cassandra.cqlengine.columns import Set, List +from cassandra.cqlengine.operators import * +from cassandra.cqlengine.statements import (UpdateStatement, WhereClause, AssignmentClause, SetUpdateClause, ListUpdateClause) import six diff --git a/cassandra/cqlengine/tests/statements/test_where_clause.py b/tests/integration/cqlengine/statements/test_where_clause.py similarity index 86% rename from cassandra/cqlengine/tests/statements/test_where_clause.py rename to tests/integration/cqlengine/statements/test_where_clause.py index cd3bcd6bd5..8ad71083fe 100644 --- a/cassandra/cqlengine/tests/statements/test_where_clause.py +++ b/tests/integration/cqlengine/statements/test_where_clause.py @@ -1,7 +1,7 @@ from unittest import TestCase import six -from cqlengine.operators import EqualsOperator -from cqlengine.statements import StatementException, WhereClause +from cassandra.cqlengine.operators import EqualsOperator +from cassandra.cqlengine.statements import StatementException, WhereClause class TestWhereClause(TestCase): diff --git a/cassandra/cqlengine/tests/test_batch_query.py b/tests/integration/cqlengine/test_batch_query.py similarity index 95% rename from cassandra/cqlengine/tests/test_batch_query.py rename to tests/integration/cqlengine/test_batch_query.py index 780dee53e2..cd53b7f89e 100644 --- a/cassandra/cqlengine/tests/test_batch_query.py +++ b/tests/integration/cqlengine/test_batch_query.py @@ -1,14 +1,10 @@ -from unittest import skip -from uuid import uuid4 -import random - -import mock import sure -from cqlengine import Model, columns -from cqlengine.management import drop_table, sync_table -from cqlengine.query import BatchQuery -from cqlengine.tests.base import BaseCassEngTestCase +from cassandra.cqlengine import columns +from cassandra.cqlengine.management import drop_table, sync_table +from cassandra.cqlengine.models import Model +from cassandra.cqlengine.query import BatchQuery +from tests.integration.cqlengine.base import BaseCassEngTestCase class TestMultiKeyModel(Model): diff --git a/cassandra/cqlengine/tests/test_consistency.py b/tests/integration/cqlengine/test_consistency.py similarity index 67% rename from cassandra/cqlengine/tests/test_consistency.py rename to tests/integration/cqlengine/test_consistency.py index 7438fccf1b..478e3c65d1 100644 --- a/cassandra/cqlengine/tests/test_consistency.py +++ b/tests/integration/cqlengine/test_consistency.py @@ -1,10 +1,13 @@ -from cqlengine.management import sync_table, drop_table -from cqlengine.tests.base import BaseCassEngTestCase -from cqlengine.models import Model -from uuid import uuid4 -from cqlengine import columns import mock -from cqlengine import ALL, BatchQuery +from uuid import uuid4 + +from cassandra import ConsistencyLevel as CL +from cassandra.cqlengine import columns +from cassandra.cqlengine.management import sync_table, drop_table +from cassandra.cqlengine.models import Model +from cassandra.cqlengine.query import BatchQuery + +from tests.integration.cqlengine.base import BaseCassEngTestCase class TestConsistencyModel(Model): @@ -28,15 +31,15 @@ def tearDownClass(cls): class TestConsistency(BaseConsistencyTest): def test_create_uses_consistency(self): - qs = TestConsistencyModel.consistency(ALL) + qs = TestConsistencyModel.consistency(CL.ALL) with mock.patch.object(self.session, 'execute') as m: qs.create(text="i am not fault tolerant this way") args = m.call_args - self.assertEqual(ALL, args[0][0].consistency_level) + self.assertEqual(CL.ALL, args[0][0].consistency_level) def test_queryset_is_returned_on_create(self): - qs = TestConsistencyModel.consistency(ALL) + qs = TestConsistencyModel.consistency(CL.ALL) self.assertTrue(isinstance(qs, TestConsistencyModel.__queryset__), type(qs)) def test_update_uses_consistency(self): @@ -44,10 +47,10 @@ def test_update_uses_consistency(self): t.text = "ham sandwich" with mock.patch.object(self.session, 'execute') as m: - t.consistency(ALL).save() + t.consistency(CL.ALL).save() args = m.call_args - self.assertEqual(ALL, args[0][0].consistency_level) + self.assertEqual(CL.ALL, args[0][0].consistency_level) def test_batch_consistency(self): @@ -58,14 +61,14 @@ def test_batch_consistency(self): args = m.call_args - self.assertEqual(ALL, args[0][0].consistency_level) + self.assertEqual(CL.ALL, args[0][0].consistency_level) with mock.patch.object(self.session, 'execute') as m: with BatchQuery() as b: TestConsistencyModel.batch(b).create(text="monkey") args = m.call_args - self.assertNotEqual(ALL, args[0][0].consistency_level) + self.assertNotEqual(CL.ALL, args[0][0].consistency_level) def test_blind_update(self): t = TestConsistencyModel.create(text="bacon and eggs") @@ -73,10 +76,10 @@ def test_blind_update(self): uid = t.id with mock.patch.object(self.session, 'execute') as m: - TestConsistencyModel.objects(id=uid).consistency(ALL).update(text="grilled cheese") + TestConsistencyModel.objects(id=uid).consistency(CL.ALL).update(text="grilled cheese") args = m.call_args - self.assertEqual(ALL, args[0][0].consistency_level) + self.assertEqual(CL.ALL, args[0][0].consistency_level) def test_delete(self): @@ -86,10 +89,10 @@ def test_delete(self): uid = t.id with mock.patch.object(self.session, 'execute') as m: - t.consistency(ALL).delete() + t.consistency(CL.ALL).delete() with mock.patch.object(self.session, 'execute') as m: - TestConsistencyModel.objects(id=uid).consistency(ALL).delete() + TestConsistencyModel.objects(id=uid).consistency(CL.ALL).delete() args = m.call_args - self.assertEqual(ALL, args[0][0].consistency_level) + self.assertEqual(CL.ALL, args[0][0].consistency_level) diff --git a/cassandra/cqlengine/tests/test_ifnotexists.py b/tests/integration/cqlengine/test_ifnotexists.py similarity index 94% rename from cassandra/cqlengine/tests/test_ifnotexists.py rename to tests/integration/cqlengine/test_ifnotexists.py index dc175f87e2..c6402561d7 100644 --- a/cassandra/cqlengine/tests/test_ifnotexists.py +++ b/tests/integration/cqlengine/test_ifnotexists.py @@ -1,13 +1,15 @@ +import mock from unittest import skipUnless -from cqlengine.management import sync_table, drop_table, create_keyspace, delete_keyspace -from cqlengine.tests.base import BaseCassEngTestCase -from cqlengine.tests.base import PROTOCOL_VERSION -from cqlengine.models import Model -from cqlengine.exceptions import LWTException, IfNotExistsWithCounterColumn -from cqlengine import columns, BatchQuery from uuid import uuid4 -import mock +from cassandra.cqlengine import columns +from cassandra.cqlengine.exceptions import LWTException, IfNotExistsWithCounterColumn +from cassandra.cqlengine.management import sync_table, drop_table +from cassandra.cqlengine.models import Model +from cassandra.cqlengine.query import BatchQuery + +from tests.integration.cqlengine.base import BaseCassEngTestCase +from tests.integration import PROTOCOL_VERSION class TestIfNotExistsModel(Model): diff --git a/cassandra/cqlengine/tests/test_load.py b/tests/integration/cqlengine/test_load.py similarity index 70% rename from cassandra/cqlengine/tests/test_load.py rename to tests/integration/cqlengine/test_load.py index 3e784ac8bd..15c93d7a2f 100644 --- a/cassandra/cqlengine/tests/test_load.py +++ b/tests/integration/cqlengine/test_load.py @@ -1,16 +1,15 @@ +import gc import os -from unittest import TestCase, skipUnless - -from cqlengine import Model, Integer -from cqlengine.management import sync_table -from cqlengine.tests import base import resource -import gc +from unittest import skipUnless -class LoadTest(Model): +from cassandra.cqlengine import columns +from cassandra.cqlengine.models import Model +from cassandra.cqlengine.management import sync_table - k = Integer(primary_key=True) - v = Integer() +class LoadTest(Model): + k = columns.Integer(primary_key=True) + v = columns.Integer() @skipUnless("LOADTEST" in os.environ, "LOADTEST not on") diff --git a/cassandra/cqlengine/tests/test_timestamp.py b/tests/integration/cqlengine/test_timestamp.py similarity index 95% rename from cassandra/cqlengine/tests/test_timestamp.py rename to tests/integration/cqlengine/test_timestamp.py index b1a6a2860d..eb7e6145af 100644 --- a/cassandra/cqlengine/tests/test_timestamp.py +++ b/tests/integration/cqlengine/test_timestamp.py @@ -1,14 +1,13 @@ -""" -Tests surrounding the blah.timestamp( timedelta(seconds=30) ) format. -""" from datetime import timedelta, datetime - -from uuid import uuid4 import mock import sure -from cqlengine import Model, columns, BatchQuery -from cqlengine.management import sync_table -from cqlengine.tests.base import BaseCassEngTestCase +from uuid import uuid4 + +from cassandra.cqlengine import columns +from cassandra.cqlengine.management import sync_table +from cassandra.cqlengine.models import Model +from cassandra.cqlengine.query import BatchQuery +from tests.integration.cqlengine.base import BaseCassEngTestCase class TestTimestampModel(Model): diff --git a/cassandra/cqlengine/tests/test_transaction.py b/tests/integration/cqlengine/test_transaction.py similarity index 85% rename from cassandra/cqlengine/tests/test_transaction.py rename to tests/integration/cqlengine/test_transaction.py index eacb366016..3b4fe68618 100644 --- a/cassandra/cqlengine/tests/test_transaction.py +++ b/tests/integration/cqlengine/test_transaction.py @@ -1,18 +1,17 @@ -__author__ = 'Tim Martin' -from unittest import skipUnless - -from cqlengine.management import sync_table, drop_table -from cqlengine.tests.base import BaseCassEngTestCase -from cqlengine.tests.base import CASSANDRA_VERSION -from cqlengine.models import Model -from cqlengine.exceptions import LWTException -from uuid import uuid4 -from cqlengine import columns, BatchQuery import mock -from cqlengine import ALL, BatchQuery -from cqlengine.statements import TransactionClause import six +from unittest import skipUnless +from uuid import uuid4 + +from cassandra.cqlengine import columns +from cassandra.cqlengine.management import sync_table, drop_table +from cassandra.cqlengine.models import Model +from cassandra.cqlengine.exceptions import LWTException +from cassandra.cqlengine.query import BatchQuery +from cassandra.cqlengine.statements import TransactionClause +from tests.integration.cqlengine.base import BaseCassEngTestCase +from tests.integration import CASSANDRA_VERSION class TestTransactionModel(Model): id = columns.UUID(primary_key=True, default=lambda:uuid4()) @@ -20,7 +19,7 @@ class TestTransactionModel(Model): text = columns.Text(required=False) -@skipUnless(CASSANDRA_VERSION >= 20, "transactions only supported on cassandra 2.0 or higher") +@skipUnless(CASSANDRA_VERSION >= '2.0.0', "transactions only supported on cassandra 2.0 or higher") class TestTransaction(BaseCassEngTestCase): @classmethod diff --git a/cassandra/cqlengine/tests/test_ttl.py b/tests/integration/cqlengine/test_ttl.py similarity index 94% rename from cassandra/cqlengine/tests/test_ttl.py rename to tests/integration/cqlengine/test_ttl.py index fc3cf57f20..96c7d48e9f 100644 --- a/cassandra/cqlengine/tests/test_ttl.py +++ b/tests/integration/cqlengine/test_ttl.py @@ -1,10 +1,10 @@ -from cqlengine.management import sync_table, drop_table -from cqlengine.tests.base import BaseCassEngTestCase -from cqlengine.models import Model +from cassandra.cqlengine.management import sync_table, drop_table +from tests.integration.cqlengine.base import BaseCassEngTestCase +from cassandra.cqlengine.models import Model from uuid import uuid4 -from cqlengine import columns +from cassandra.cqlengine import columns import mock -from cqlengine.connection import get_session +from cassandra.cqlengine.connection import get_session class TestTTLModel(Model): From 1a9199ea5f276f519bd9a5aeec79ad686154f429 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 4 Feb 2015 13:07:05 -0600 Subject: [PATCH 1200/3726] Make cqlengine tests work in present state, following relocation. --- cassandra/cqlengine/query.py | 26 +++++++++---------- .../management/test_compaction_settings.py | 2 +- .../cqlengine/management/test_management.py | 8 +++--- .../integration/cqlengine/query/test_named.py | 2 +- .../integration/cqlengine/test_consistency.py | 2 +- 5 files changed, 20 insertions(+), 20 deletions(-) diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index ca647f016f..3acb007db6 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -2,7 +2,7 @@ from datetime import datetime, timedelta import time -from cassandra.cqlengine.columns import BaseContainerColumn, Map, Counter, List, Set +from cassandra.cqlengine import columns from cassandra.cqlengine.connection import execute, NOT_SET from cassandra.cqlengine.exceptions import CQLEngineException, ValidationError, LWTException, IfNotExistsWithCounterColumn from cassandra.cqlengine.functions import Token, BaseQueryFunction, QueryValue, UnicodeMixin @@ -816,15 +816,15 @@ def update(self, **values): continue # add the update statements - if isinstance(col, Counter): + if isinstance(col, columns.Counter): # TODO: implement counter updates raise NotImplementedError - elif isinstance(col, (List, Set, Map)): - if isinstance(col, List): + elif isinstance(col, (columns.List, columns.Set, columns.Map)): + if isinstance(col, columns.List): klass = ListUpdateClause - elif isinstance(col, Set): + elif isinstance(col, columns.Set): klass = SetUpdateClause - elif isinstance(col, Map): + elif isinstance(col, columns.Map): klass = MapUpdateClause else: raise RuntimeError @@ -893,7 +893,7 @@ def _delete_null_columns(self): if v.deleted: ds.add_field(col.db_field_name) deleted_fields = True - elif isinstance(col, Map): + elif isinstance(col, columns.Map): uc = MapDeleteClause(col.db_field_name, v.value, v.previous_value) if uc.get_context_size() > 0: ds.add_field(uc) @@ -938,16 +938,16 @@ def update(self): continue # don't update something if it hasn't changed - if not val_mgr.changed and not isinstance(col, Counter): + if not val_mgr.changed and not isinstance(col, columns.Counter): continue static_changed_only = static_changed_only and col.static - if isinstance(col, (BaseContainerColumn, Counter)): + if isinstance(col, (columns.BaseContainerColumn, columns.Counter)): # get appropriate clause - if isinstance(col, List): klass = ListUpdateClause - elif isinstance(col, Map): klass = MapUpdateClause - elif isinstance(col, Set): klass = SetUpdateClause - elif isinstance(col, Counter): klass = CounterUpdateClause + if isinstance(col, columns.List): klass = ListUpdateClause + elif isinstance(col, columns.Map): klass = MapUpdateClause + elif isinstance(col, columns.Set): klass = SetUpdateClause + elif isinstance(col, columns.Counter): klass = CounterUpdateClause else: raise RuntimeError # do the stuff diff --git a/tests/integration/cqlengine/management/test_compaction_settings.py b/tests/integration/cqlengine/management/test_compaction_settings.py index 5358a4aa6b..5187837a67 100644 --- a/tests/integration/cqlengine/management/test_compaction_settings.py +++ b/tests/integration/cqlengine/management/test_compaction_settings.py @@ -90,7 +90,7 @@ class AlterTableTest(BaseCassEngTestCase): def test_alter_is_called_table(self): drop_table(LeveledcompactionTestTable) sync_table(LeveledcompactionTestTable) - with patch('cqlengine.management.update_compaction') as mock: + with patch('cassandra.cqlengine.management.update_compaction') as mock: sync_table(LeveledcompactionTestTable) assert mock.called == 1 diff --git a/tests/integration/cqlengine/management/test_management.py b/tests/integration/cqlengine/management/test_management.py index 3359547a07..317f789b8d 100644 --- a/tests/integration/cqlengine/management/test_management.py +++ b/tests/integration/cqlengine/management/test_management.py @@ -156,11 +156,11 @@ def test_set_table_properties(self): # TODO: due to a bug in the native driver i'm not seeing the local read repair chance show up # 'local_read_repair_chance': 0.50811, } - if CASSANDRA_VERSION >= '2.0.0': + if CASSANDRA_VERSION <= '2.0.0': expected['caching'] = CACHING_ALL expected['replicate_on_write'] = False - if CASSANDRA_VERSION >= '2.0.0': + if CASSANDRA_VERSION == '2.0.0': expected['populate_io_cache_on_flush'] = True expected['index_interval'] = 98706 @@ -200,11 +200,11 @@ def test_table_property_update(self): expected['memtable_flush_period_in_ms'] = 60210 expected['default_time_to_live'] = 65178 - if CASSANDRA_VERSION >= '2.0.0': + if CASSANDRA_VERSION == '2.0.0': expected['index_interval'] = 94207 # these featuers removed in cassandra 2.1 - if CASSANDRA_VERSION >= '2.0.0': + if CASSANDRA_VERSION <= '2.0.0': expected['caching'] = CACHING_NONE expected['replicate_on_write'] = True expected['populate_io_cache_on_flush'] = False diff --git a/tests/integration/cqlengine/query/test_named.py b/tests/integration/cqlengine/query/test_named.py index a5f31c554f..90301b96f5 100644 --- a/tests/integration/cqlengine/query/test_named.py +++ b/tests/integration/cqlengine/query/test_named.py @@ -102,7 +102,7 @@ class TestQuerySetCountSelectionAndIteration(BaseQuerySetUsage): def setUpClass(cls): super(TestQuerySetCountSelectionAndIteration, cls).setUpClass() - from cassandra.cqlengine.tests.query.test_queryset import TestModel + from tests.integration.cqlengine.query.test_queryset import TestModel ks,tn = TestModel.column_family_name().split('.') cls.keyspace = NamedKeyspace(ks) diff --git a/tests/integration/cqlengine/test_consistency.py b/tests/integration/cqlengine/test_consistency.py index 478e3c65d1..1137d82845 100644 --- a/tests/integration/cqlengine/test_consistency.py +++ b/tests/integration/cqlengine/test_consistency.py @@ -56,7 +56,7 @@ def test_update_uses_consistency(self): def test_batch_consistency(self): with mock.patch.object(self.session, 'execute') as m: - with BatchQuery(consistency=ALL) as b: + with BatchQuery(consistency=CL.ALL) as b: TestConsistencyModel.batch(b).create(text="monkey") args = m.call_args From f1efeaca9b306860e2eff6c50c103bd9924e7445 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 4 Feb 2015 16:26:16 -0600 Subject: [PATCH 1201/3726] Add a mechanism to run with an existing, external ccm cluster. --- tests/integration/__init__.py | 66 +++++++++++++++++++++++++---------- 1 file changed, 48 insertions(+), 18 deletions(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 339254a25d..70615baeb6 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -38,27 +38,11 @@ raise unittest.SkipTest('ccm is a dependency for integration tests:', e) CLUSTER_NAME = 'test_cluster' +SINGLE_NODE_CLUSTER_NAME = 'single_node' MULTIDC_CLUSTER_NAME = 'multidc_test_cluster' CCM_CLUSTER = None -CASSANDRA_DIR = os.getenv('CASSANDRA_DIR', None) -CASSANDRA_VERSION = os.getenv('CASSANDRA_VERSION', '2.0.9') - -CCM_KWARGS = {} -if CASSANDRA_DIR: - log.info("Using Cassandra dir: %s", CASSANDRA_DIR) - CCM_KWARGS['install_dir'] = CASSANDRA_DIR -else: - log.info('Using Cassandra version: %s', CASSANDRA_VERSION) - CCM_KWARGS['version'] = CASSANDRA_VERSION - -if CASSANDRA_VERSION.startswith('1'): - default_protocol_version = 1 -else: - default_protocol_version = 2 -PROTOCOL_VERSION = int(os.getenv('PROTOCOL_VERSION', default_protocol_version)) - path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'ccm') if not os.path.exists(path): os.mkdir(path) @@ -77,7 +61,7 @@ def get_server_versions(): if cass_version is not None: return (cass_version, cql_version) - c = Cluster(protocol_version=PROTOCOL_VERSION) + c = Cluster() s = c.connect() s.set_keyspace('system') row = s.execute('SELECT cql_version, release_version FROM local')[0] @@ -97,6 +81,40 @@ def _tuple_version(version_string): return tuple([int(p) for p in version_string.split('.')]) +USE_CCM_CASS_EXTERNAL = bool(os.getenv('USE_CCM_CASS_EXTERNAL', False)) + +default_cassandra_version = '2.1.2' + +if USE_CCM_CASS_EXTERNAL: + path = common.get_default_path() + name = common.current_cluster_name(path) + CCM_CLUSTER = CCMClusterFactory.load(common.get_default_path(), name) + CCM_CLUSTER.start(wait_for_binary_proto=True, wait_other_notice=True) + + cass_ver, _ = get_server_versions() + default_cassandra_version = '.'.join('%d' % i for i in cass_ver) + +CASSANDRA_DIR = os.getenv('CASSANDRA_DIR', None) +CASSANDRA_VERSION = os.getenv('CASSANDRA_VERSION', default_cassandra_version) + +CCM_KWARGS = {} +if CASSANDRA_DIR: + log.info("Using Cassandra dir: %s", CASSANDRA_DIR) + CCM_KWARGS['install_dir'] = CASSANDRA_DIR +else: + log.info('Using Cassandra version: %s', CASSANDRA_VERSION) + CCM_KWARGS['version'] = CASSANDRA_VERSION + +if CASSANDRA_VERSION > '2.1': + default_protocol_version = 3 +elif CASSANDRA_VERSION > '2.0': + default_protocol_version = 2 +else: + default_protocol_version = 1 + +PROTOCOL_VERSION = int(os.getenv('PROTOCOL_VERSION', default_protocol_version)) + + def get_cluster(): return CCM_CLUSTER @@ -113,7 +131,14 @@ def use_singledc(start=True): use_cluster(CLUSTER_NAME, [3], start=start) +def use_single_node(start=True): + use_cluster(SINGLE_NODE_CLUSTER_NAME, [1], start=start) + + def remove_cluster(): + if USE_CCM_CASS_EXTERNAL: + return + global CCM_CLUSTER if CCM_CLUSTER: log.debug("removing cluster %s", CCM_CLUSTER.name) @@ -131,6 +156,9 @@ def is_current_cluster(cluster_name, node_counts): def use_cluster(cluster_name, nodes, ipformat=None, start=True): + if USE_CCM_CASS_EXTERNAL: + return + if is_current_cluster(cluster_name, nodes): log.debug("Using existing cluster %s", cluster_name) return @@ -165,6 +193,8 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True): def teardown_package(): + if USE_CCM_CASS_EXTERNAL: + return # when multiple modules are run explicitly, this runs between them # need to make sure CCM_CLUSTER is properly cleared for that case remove_cluster() From 426664d3e1322b8eaf11ccae22cc4328ef780b2f Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 4 Feb 2015 16:27:23 -0600 Subject: [PATCH 1202/3726] typo --- tests/integration/cqlengine/management/test_management.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/cqlengine/management/test_management.py b/tests/integration/cqlengine/management/test_management.py index 317f789b8d..4d8df35e8b 100644 --- a/tests/integration/cqlengine/management/test_management.py +++ b/tests/integration/cqlengine/management/test_management.py @@ -52,7 +52,7 @@ class PrimaryKeysOnlyModel(Model): class CapitalizedKeyTest(BaseCassEngTestCase): def test_table_definition(self): - """ Tests that creating a table with capitalized column names succeedso """ + """ Tests that creating a table with capitalized column names succeeds """ sync_table(LowercaseKeyModel) sync_table(CapitalizedKeyModel) From 3171119cc12717e0a856531949fda514d38d2ea4 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 4 Feb 2015 16:49:40 -0600 Subject: [PATCH 1203/3726] Make cqlengine tests invoke ccm. --- tests/integration/__init__.py | 3 ++- tests/integration/cqlengine/__init__.py | 4 +++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 70615baeb6..0658a29dbb 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -156,14 +156,15 @@ def is_current_cluster(cluster_name, node_counts): def use_cluster(cluster_name, nodes, ipformat=None, start=True): + global CCM_CLUSTER if USE_CCM_CASS_EXTERNAL: + log.debug("Using external cluster %s", CCM_CLUSTER.name) return if is_current_cluster(cluster_name, nodes): log.debug("Using existing cluster %s", cluster_name) return - global CCM_CLUSTER if CCM_CLUSTER: log.debug("Stopping cluster %s", CCM_CLUSTER.name) CCM_CLUSTER.stop() diff --git a/tests/integration/cqlengine/__init__.py b/tests/integration/cqlengine/__init__.py index 8fa5480e22..22069e43e6 100644 --- a/tests/integration/cqlengine/__init__.py +++ b/tests/integration/cqlengine/__init__.py @@ -1,10 +1,12 @@ from cassandra.cqlengine import connection from cassandra.cqlengine.management import create_keyspace -from tests.integration import PROTOCOL_VERSION +from tests.integration import use_single_node, PROTOCOL_VERSION def setup_package(): + use_single_node() + keyspace = 'cqlengine_test' connection.setup(['localhost'], protocol_version=PROTOCOL_VERSION, From 53777da01f1858a5775df1395d5fa5351979c598 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 5 Feb 2015 11:41:41 -0600 Subject: [PATCH 1204/3726] Remove cqlengine project artifacts, update existing README --- AUTHORS | 17 -- CONTRIBUTING.md | 12 - Makefile | 14 -- README.rst | 13 +- RELEASE.txt | 7 - Vagrantfile | 46 ---- bin/get_changelog.py | 18 -- bin/test.py | 6 - changelog | 279 ---------------------- manifests/default.pp | 66 ----- modules/cassandra/files/cassandra.upstart | 18 -- requirements-dev.txt | 7 - test-requirements.txt | 2 + upgrading.txt | 13 - 14 files changed, 9 insertions(+), 509 deletions(-) delete mode 100644 AUTHORS delete mode 100644 CONTRIBUTING.md delete mode 100644 Makefile delete mode 100644 RELEASE.txt delete mode 100644 Vagrantfile delete mode 100644 bin/get_changelog.py delete mode 100755 bin/test.py delete mode 100644 changelog delete mode 100644 manifests/default.pp delete mode 100644 modules/cassandra/files/cassandra.upstart delete mode 100644 requirements-dev.txt delete mode 100644 upgrading.txt diff --git a/AUTHORS b/AUTHORS deleted file mode 100644 index 8e03f7f80b..0000000000 --- a/AUTHORS +++ /dev/null @@ -1,17 +0,0 @@ -PRIMARY AUTHORS - -Blake Eggleston -Jon Haddad - -CONTRIBUTORS (let us know if we missed you) - -Eric Scrivner - test environment, connection pooling -Kevin Deldycke -Roey Berman -Danny Cosson -Michael Hall -Netanel Cohen-Tzemach -Mariusz Kryński -Greg Doermann -@pandu-rao -Amy Hanlon diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md deleted file mode 100644 index 62f6d786bb..0000000000 --- a/CONTRIBUTING.md +++ /dev/null @@ -1,12 +0,0 @@ -### Contributing code to cqlengine - -Before submitting a pull request, please make sure that it follows these guidelines: - -* Limit yourself to one feature or bug fix per pull request. -* Include unittests that thoroughly test the feature/bug fix -* Write clear, descriptive commit messages. -* Many granular commits are preferred over large monolithic commits -* If you're adding or modifying features, please update the documentation - -If you're working on a big ticket item, please check in on [cqlengine-users](https://groups.google.com/forum/?fromgroups#!forum/cqlengine-users). -We'd hate to have to steer you in a different direction after you've already put in a lot of hard work. diff --git a/Makefile b/Makefile deleted file mode 100644 index 061fc27426..0000000000 --- a/Makefile +++ /dev/null @@ -1,14 +0,0 @@ -clean: - find . -name *.pyc -delete - rm -rf cqlengine/__pycache__ - - -build: clean - python setup.py build - -release: clean - python setup.py sdist upload - - -.PHONY: build - diff --git a/README.rst b/README.rst index d4995afaad..7496774061 100644 --- a/README.rst +++ b/README.rst @@ -8,7 +8,9 @@ A Python client driver for Apache Cassandra. This driver works exclusively with the Cassandra Query Language v3 (CQL3) and Cassandra's native protocol. Cassandra versions 1.2 through 2.1 are supported. -The driver supports Python 2.6, 2.7, 3.3, and 3.4. +The driver supports Python 2.6, 2.7, 3.3, and 3.4*. + +* cqlengine component presently supports Python 2.7+ Installation ------------ @@ -32,8 +34,9 @@ A couple of links for getting up to speed: Object Mapper ------------- -The recommended object mapper for CQL is `cqlengine `_, -which utilizes this driver. +cqlengine (originally developed by Blake Eggleston and Jon Haddad, with contributions from the +community) is now maintained as an integral part of this package. Refer to +`documentation here `## UPDATE LINK ##. Reporting Problems ------------------ @@ -54,12 +57,10 @@ you can use `freenode's web-based client true - - # Provision with puppet - config.vm.provision :shell, :inline => "apt-get update" - - config.vm.provision :puppet, :options => ['--verbose', '--debug'] do |puppet| - puppet.facter = {'hostname' => 'cassandraengine'} - # puppet.manifests_path = "puppet/manifests" - # puppet.manifest_file = "site.pp" - puppet.module_path = "modules" - end -end diff --git a/bin/get_changelog.py b/bin/get_changelog.py deleted file mode 100644 index 7844f66c0b..0000000000 --- a/bin/get_changelog.py +++ /dev/null @@ -1,18 +0,0 @@ -from github import Github -import sys, os - -g = Github(os.environ["GITHUB_TOKEN"]) - -milestone = sys.argv[1] -print "Fetching all issues for milestone %s" % milestone -repo = g.get_repo("cqlengine/cqlengine") - -milestones = repo.get_milestones() -milestone = [x for x in milestones if x.title == milestone][0] -issues = repo.get_issues(milestone=milestone, state="closed") - -for issue in issues: - print "[%d] %s" % (issue.number, issue.title) - -print("Done") - diff --git a/bin/test.py b/bin/test.py deleted file mode 100755 index d8d5097beb..0000000000 --- a/bin/test.py +++ /dev/null @@ -1,6 +0,0 @@ -#!/usr/bin/env python - -import nose - - -nose.main() diff --git a/changelog b/changelog deleted file mode 100644 index 2eb0ccd64f..0000000000 --- a/changelog +++ /dev/null @@ -1,279 +0,0 @@ -CHANGELOG - - -0.21.1 - -removed references to create_missing_keyspace - - -0.21.0 - -Create keyspace no longer defaults to SimpleStrategy with 3 replicas. It must be manually specified. -sync_table() no longer has an option to create keyspaces automatically. -cqlengine_test is now the default keyspace used for tests and is explicitly created -all references hardcoded to the "test" keyspace have been removed and use the default instead -instead of testing with bin/nose, use setup_package -improved validation error messages to include column name -fixed updating empty sets -improvements to puppet download script -per query timeouts now supported, see https://cqlengine.readthedocs.org/en/latest/topics/queryset.html?highlight=timeout#per-query-timeouts - -0.19.0 - -Fixed tests with Cassandra version 1.2 and 2.1 -Fixed broken static columns -support for IF NOT EXISTS via Model.if_not_exists().create() - -0.18.1 - -[264] fixed support for bytearrays in blob columns - - -0.18.0 - -[260] support for static columns -[259] docs update - examples for the queryset method references -[258] improve docs around blind updates -[256] write docs about developing cqlengine itself -[254] use CASSANDRA_VERSION in tests -[252] memtable_flush_period_in_ms not in C 1.2 - guard against test failure -[251] test fails occasionally despite lists looking the same -[246] document Batches default behaviour -[239] add sphinx contrib module to docs -[237] update to cassandra-driver 2.1 -[232] update docs w/ links to external tutorials -[229] get travis to test different cassandra versions -[211] inet datatype support -[209] Python 3 support - - -0.17.0 - -* retry_connect on setup() -* optional default TTL on model -* fixed caching documentation - - -0.16.0 - - 225: No handling of PagedResult from execute - 222: figure out how to make travis not totally fail when a test is skipped - 220: delayed connect. use setup(delayed_connect=True) - 218: throw exception on create_table and delete_table - 212: Unchanged primary key trigger error on update - 206: FAQ - why we dont' do #189 - 191: Add support for simple table properties. - 172: raise exception when None is passed in as query param - 170: trying to perform queries before connection is established should raise useful exception - 162: Not Possible to Make Non-Equality Filtering Queries - 161: Filtering on DateTime column - 154: Blob(bytes) column type issue - 128: remove default read_repair_chance & ensure changes are saved when using sync_table - 106: specify caching on model - 99: specify caching options table management - 94: type checking on sync_table (currently allows anything then fails miserably) - 73: remove default 'cqlengine' keyspace table management - 71: add named table and query expression usage to docs - - - -0.15.0 -* native driver integration - -0.14.0 - -* fix for setting map to empty (Lifto) -* report when creating models with attributes that conflict with cqlengine (maedhroz) -* use stable version of sure package (maedhroz) -* performance improvements - - -0.13.0 -* adding support for post batch callbacks (thanks Daniel Dotsenko github.com/dvdotsenko) -* fixing sync table for tables with multiple keys (thanks Daniel Dotsenko github.com/dvdotsenko) -* fixing bug in Bytes column (thanks Netanel Cohen-Tzemach github.com/natict) -* fixing bug with timestamps and DST (thanks Netanel Cohen-Tzemach github.com/natict) - -0.12.0 - -* Normalize and unquote boolean values. (Thanks Kevin Deldycke github.com/kdeldycke) -* Fix race condition in connection manager (Thanks Roey Berman github.com/bergundy) -* allow access to instance columns as if it is a dict (Thanks Kevin Deldycke github.com/kdeldycke) -* Added support for blind partial updates to queryset (Thanks Danny Cosson github.com/dcosson) -* Model instance equality check bugfix (Thanks to Michael Hall, github.com/mahall) -* Fixed bug syncing tables with camel cased names (Thanks to Netanel Cohen-Tzemach, github.com/natict) -* Fixed bug dealing with eggs (Thanks Kevin Deldycke github.com/kdeldycke) - -0.11.0 - -* support for USING TIMESTAMP via a .timestamp(timedelta(seconds=30)) syntax - - allows for long, timedelta, and datetime -* fixed use of USING TIMESTAMP in batches -* clear TTL and timestamp off models after persisting to DB -* allows UUID without dashes - (Thanks to Michael Hall, github.com/mahall) -* fixes regarding syncing schema settings (thanks Kai Lautaportti github.com/dokai) - -0.10.0 - -* support for execute_on_exception within batches - -0.9.2 -* fixing create keyspace with network topology strategy -* fixing regression with query expressions - -0.9 -* adding update method -* adding support for ttls -* adding support for per-query consistency -* adding BigInt column (thanks @Lifto) -* adding support for timezone aware time uuid functions (thanks @dokai) -* only saving collection fields on insert if they've been modified -* adding model method that returns a list of modified columns - -0.8.5 -* adding support for timeouts - -0.8.4 -* changing value manager previous value copying to deepcopy - -0.8.3 -* better logging for operational errors - -0.8.2 -* fix for connection failover - -0.8.1 -* fix for models not exactly matching schema - -0.8.0 -* support for table polymorphism -* var int type - -0.7.1 -* refactoring query class to make defining custom model instantiation logic easier - -0.7.0 -* added counter columns -* added support for compaction settings at the model level -* deprecated delete_table in favor of drop_table -* deprecated create_table in favor of sync_table -* added support for custom QuerySets - -0.6.0 -* added table sync - -0.5.2 -* adding hex conversion to Bytes column - -0.5.1 -* improving connection pooling -* fixing bug with clustering order columns not being quoted - -0.5 -* eagerly loading results into the query result cache, the cql driver does this anyway, - and pulling them from the cursor was causing some problems with gevented queries, - this will cause some breaking changes for users calling execute directly - -0.4.10 -* changing query parameter placeholders from uuid1 to uuid4 - -0.4.7 -* adding support for passing None into query batch methods to clear any batch objects - -0.4.6 -* fixing the way models are compared - -0.4.5 -* fixed bug where container columns would not call their child to_python method, this only really affected columns with special to_python logic - -0.4.4 -* adding query logging back -* fixed bug updating an empty list column - -0.4.3 -* fixed bug with Text column validation - -0.4.2 -* added support for instantiating container columns with column instances - -0.4.1 -* fixed bug in TimeUUID from datetime method - -0.4.0 -* removed default values from all column types -* explicit primary key is required (automatic id removed) -* added validation on keyname types on .create() -* changed table_name to __table_name__, read_repair_chance to __read_repair_chance__, keyspace to __keyspace__ -* modified table name auto generator to ignore module name -* changed internal implementation of model value get/set -* added TimeUUID.from_datetime(), used for generating UUID1's for a specific -time - -0.3.3 -* added abstract base class models - -0.3.2 -* comprehesive rewrite of connection management (thanks @rustyrazorblade) - -0.3 -* added support for Token function (thanks @mrk-its) -* added support for compound partition key (thanks @mrk-its)s -* added support for defining clustering key ordering (thanks @mrk-its) -* added values_list to Query class, bypassing object creation if desired (thanks @mrk-its) -* fixed bug with Model.objects caching values (thanks @mrk-its) -* fixed Cassandra 1.2.5 compatibility bug -* updated model exception inheritance - -0.2.1 -* adding support for datetimes with tzinfo (thanks @gdoermann) -* fixing bug in saving map updates (thanks @pandu-rao) - -0.2 -* expanding internal save function to use update where appropriate -* adding set, list, and map collection types -* adding support for allow filtering flag -* updating management functions to work with cassandra 1.2 -* fixed a bug querying datetimes -* modifying datetime serialization to preserve millisecond accuracy -* adding cql function call generators MaxTimeUUID and MinTimeUUID - -0.1.1 -* fixed a bug occurring when creating tables from the REPL - -0.1 -* added min_length and max_length validators to the Text column -* added == and != equality operators to model class -* fixed bug with default values that would evaluate to False (ie: 0, '') - -0.0.9 -* fixed column inheritance bug -* manually defined table names are no longer inherited by subclasses - -0.0.8 -* added configurable read repair chance to model definitions -* added configurable keyspace strategy class and replication factor to keyspace creator - -0.0.7 -* fixed manual table name bug -* changed model level db_name field to table_name - -0.0.6 -* added TimeUUID column - -0.0.5 -* added connection pooling -* adding a few convenience query classmethods to the model class - -0.0.4-ALPHA -* added Sphinx docs -* changing create_column_family management function to create_table -* changing delete_column_family management function to delete_table -* added partition key validation to QuerySet delete method -* added .get() method to QuerySet -* added create method shortcut to the model class - -0.0.3-ALPHA -* added queryset result caching -* added queryset array index access and slicing -* updating table name generation (more readable) - diff --git a/manifests/default.pp b/manifests/default.pp deleted file mode 100644 index 4b94fc8512..0000000000 --- a/manifests/default.pp +++ /dev/null @@ -1,66 +0,0 @@ -# Basic virtualbox configuration -Exec { path => "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" } - -node basenode { - package{["build-essential", "git-core", "vim"]: - ensure => installed - } -} - -class xfstools { - package{['lvm2', 'xfsprogs']: - ensure => installed - } -} -class java { - package {['openjdk-7-jre-headless']: - ensure => installed - } -} - -class cassandra { - include xfstools - include java - - package {"wget": - ensure => latest - } - - file {"/etc/init/cassandra.conf": - source => "puppet:///modules/cassandra/cassandra.upstart", - owner => root - } - - exec {"download-cassandra": - cwd => "/tmp", - command => "wget http://download.nextag.com/apache/cassandra/1.2.19/apache-cassandra-1.2.19-bin.tar.gz", - creates => "/tmp/apache-cassandra-1.2.19-bin.tar.gz", - require => [Package["wget"], File["/etc/init/cassandra.conf"]] - } - - exec {"install-cassandra": - cwd => "/tmp", - command => "tar -xzf apache-cassandra-1.2.19-bin.tar.gz; mv apache-cassandra-1.2.19 /usr/local/cassandra", - require => Exec["download-cassandra"], - creates => "/usr/local/cassandra/bin/cassandra" - } - - service {"cassandra": - ensure => running, - require => Exec["install-cassandra"] - } -} - -node cassandraengine inherits basenode { - include cassandra - - package {["python-pip", "python-dev", "python-nose"]: - ensure => installed - } - - exec {"install-requirements": - cwd => "/vagrant", - command => "pip install -r requirements-dev.txt", - require => [Package["python-pip"], Package["python-dev"]] - } -} diff --git a/modules/cassandra/files/cassandra.upstart b/modules/cassandra/files/cassandra.upstart deleted file mode 100644 index 05278cad04..0000000000 --- a/modules/cassandra/files/cassandra.upstart +++ /dev/null @@ -1,18 +0,0 @@ -# Cassandra Upstart -# - -description "Cassandra" - -start on runlevel [2345] -stop on runlevel [!2345] - -respawn - -exec /usr/local/cassandra/bin/cassandra -f - - - -pre-stop script - kill `cat /tmp/cassandra.pid` - sleep 3 -end script \ No newline at end of file diff --git a/requirements-dev.txt b/requirements-dev.txt deleted file mode 100644 index 748d8bbe54..0000000000 --- a/requirements-dev.txt +++ /dev/null @@ -1,7 +0,0 @@ --r requirements.txt -nose -nose-progressive -profilestats -pycallgraph -ipdbplugin==1.2 -ipdb==0.7 diff --git a/test-requirements.txt b/test-requirements.txt index 430515c0d3..e4d2d3836b 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -1,3 +1,4 @@ +-r requirements.txt blist scales nose @@ -6,3 +7,4 @@ ccm>=2.0 unittest2 PyYAML pytz +sure diff --git a/upgrading.txt b/upgrading.txt deleted file mode 100644 index 95727aa546..0000000000 --- a/upgrading.txt +++ /dev/null @@ -1,13 +0,0 @@ -0.15 Upgrade to use Datastax Native Driver - -We no longer raise cqlengine based OperationalError when connection fails. Now using the exception thrown in the native driver. - -If you're calling setup() with the old thrift port in place, you will get connection errors. Either remove it (the default native port is assumed) or change to the default native port, 9042. - -The cqlengine connection pool has been removed. Connections are now managed by the native driver. This should drastically reduce the socket overhead as the native driver can multiplex queries. - -If you were previously manually using "ALL", "QUORUM", etc, to specificy consistency levels, you will need to migrate to the cqlengine.ALL, QUORUM, etc instead. If you had been using the module level constants before, nothing should need to change. - -No longer accepting username & password as arguments to setup. Use the native driver's authentication instead. See http://datastax.github.io/python-driver/api/cassandra/auth.html - - From c13f047f45756bcf6613e21b3dd6067ce2de7a41 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 5 Feb 2015 12:21:51 -0600 Subject: [PATCH 1205/3726] Rearrange cqlengine docs --- README.md | 96 ------------------- docs/{topics => cqlengine}/columns.rst | 0 docs/{topics => cqlengine}/connection.rst | 0 docs/{topics => cqlengine}/development.rst | 0 .../external_resources.rst | 0 docs/{topics => cqlengine}/faq.rst | 0 docs/{topics => cqlengine}/manage_schemas.rst | 0 docs/{topics => cqlengine}/models.rst | 0 docs/{topics => cqlengine}/queryset.rst | 0 .../related_projects.rst | 0 docs/{topics => cqlengine}/third_party.rst | 0 11 files changed, 96 deletions(-) delete mode 100644 README.md rename docs/{topics => cqlengine}/columns.rst (100%) rename docs/{topics => cqlengine}/connection.rst (100%) rename docs/{topics => cqlengine}/development.rst (100%) rename docs/{topics => cqlengine}/external_resources.rst (100%) rename docs/{topics => cqlengine}/faq.rst (100%) rename docs/{topics => cqlengine}/manage_schemas.rst (100%) rename docs/{topics => cqlengine}/models.rst (100%) rename docs/{topics => cqlengine}/queryset.rst (100%) rename docs/{topics => cqlengine}/related_projects.rst (100%) rename docs/{topics => cqlengine}/third_party.rst (100%) diff --git a/README.md b/README.md deleted file mode 100644 index 594675abb0..0000000000 --- a/README.md +++ /dev/null @@ -1,96 +0,0 @@ -cqlengine -=============== - -![cqlengine build status](https://travis-ci.org/cqlengine/cqlengine.svg?branch=master) -![cqlengine pypi version](http://img.shields.io/pypi/v/cqlengine.svg) -![cqlengine monthly downloads](http://img.shields.io/pypi/dm/cqlengine.svg) - -cqlengine is a Cassandra CQL 3 Object Mapper for Python - -**Users of versions < 0.16, the default keyspace 'cqlengine' has been removed. Please read this before upgrading:** [Breaking Changes](https://cqlengine.readthedocs.org/en/latest/topics/models.html#keyspace-change) - -[Documentation](https://cqlengine.readthedocs.org/en/latest/) - -[Report a Bug](https://github.com/cqlengine/cqlengine/issues) - -[Users Mailing List](https://groups.google.com/forum/?fromgroups#!forum/cqlengine-users) - -## Installation -``` -pip install cqlengine -``` - -## Getting Started on your local machine - -```python -#first, define a model -import uuid -from cqlengine import columns -from cqlengine.models import Model - -class ExampleModel(Model): - read_repair_chance = 0.05 # optional - defaults to 0.1 - example_id = columns.UUID(primary_key=True, default=uuid.uuid4) - example_type = columns.Integer(index=True) - created_at = columns.DateTime() - description = columns.Text(required=False) - -#next, setup the connection to your cassandra server(s) and the default keyspace... ->>> from cqlengine import connection ->>> connection.setup(['127.0.0.1'], "cqlengine") - -# or if you're still on cassandra 1.2 ->>> connection.setup(['127.0.0.1'], "cqlengine", protocol_version=1) - -# create your keyspace. This is, in general, not what you want in production -# see https://cassandra.apache.org/doc/cql3/CQL.html#createKeyspaceStmt for options ->>> create_keyspace("cqlengine", "SimpleStrategy", 1) - -#...and create your CQL table ->>> from cqlengine.management import sync_table ->>> sync_table(ExampleModel) - -#now we can create some rows: ->>> em1 = ExampleModel.create(example_type=0, description="example1", created_at=datetime.now()) ->>> em2 = ExampleModel.create(example_type=0, description="example2", created_at=datetime.now()) ->>> em3 = ExampleModel.create(example_type=0, description="example3", created_at=datetime.now()) ->>> em4 = ExampleModel.create(example_type=0, description="example4", created_at=datetime.now()) ->>> em5 = ExampleModel.create(example_type=1, description="example5", created_at=datetime.now()) ->>> em6 = ExampleModel.create(example_type=1, description="example6", created_at=datetime.now()) ->>> em7 = ExampleModel.create(example_type=1, description="example7", created_at=datetime.now()) ->>> em8 = ExampleModel.create(example_type=1, description="example8", created_at=datetime.now()) - -#and now we can run some queries against our table ->>> ExampleModel.objects.count() -8 ->>> q = ExampleModel.objects(example_type=1) ->>> q.count() -4 ->>> for instance in q: ->>> print instance.description -example5 -example6 -example7 -example8 - -#here we are applying additional filtering to an existing query -#query objects are immutable, so calling filter returns a new -#query object ->>> q2 = q.filter(example_id=em5.example_id) - ->>> q2.count() -1 ->>> for instance in q2: ->>> print instance.description -example5 -``` - -## Contributing - -If you'd like to contribute to cqlengine, please read the [contributor guidelines](https://github.com/bdeggleston/cqlengine/blob/master/CONTRIBUTING.md) - - -## Authors - -cqlengine was developed primarily by (Blake Eggleston)[blakeeggleston](https://twitter.com/blakeeggleston) and [Jon Haddad](https://twitter.com/rustyrazorblade), with contributions from several others in the community. - diff --git a/docs/topics/columns.rst b/docs/cqlengine/columns.rst similarity index 100% rename from docs/topics/columns.rst rename to docs/cqlengine/columns.rst diff --git a/docs/topics/connection.rst b/docs/cqlengine/connection.rst similarity index 100% rename from docs/topics/connection.rst rename to docs/cqlengine/connection.rst diff --git a/docs/topics/development.rst b/docs/cqlengine/development.rst similarity index 100% rename from docs/topics/development.rst rename to docs/cqlengine/development.rst diff --git a/docs/topics/external_resources.rst b/docs/cqlengine/external_resources.rst similarity index 100% rename from docs/topics/external_resources.rst rename to docs/cqlengine/external_resources.rst diff --git a/docs/topics/faq.rst b/docs/cqlengine/faq.rst similarity index 100% rename from docs/topics/faq.rst rename to docs/cqlengine/faq.rst diff --git a/docs/topics/manage_schemas.rst b/docs/cqlengine/manage_schemas.rst similarity index 100% rename from docs/topics/manage_schemas.rst rename to docs/cqlengine/manage_schemas.rst diff --git a/docs/topics/models.rst b/docs/cqlengine/models.rst similarity index 100% rename from docs/topics/models.rst rename to docs/cqlengine/models.rst diff --git a/docs/topics/queryset.rst b/docs/cqlengine/queryset.rst similarity index 100% rename from docs/topics/queryset.rst rename to docs/cqlengine/queryset.rst diff --git a/docs/topics/related_projects.rst b/docs/cqlengine/related_projects.rst similarity index 100% rename from docs/topics/related_projects.rst rename to docs/cqlengine/related_projects.rst diff --git a/docs/topics/third_party.rst b/docs/cqlengine/third_party.rst similarity index 100% rename from docs/topics/third_party.rst rename to docs/cqlengine/third_party.rst From 5a22c2d7e460b61a947092755e87152c4cc801a2 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 6 Feb 2015 17:03:53 -0600 Subject: [PATCH 1206/3726] cqlengine integration: split docs into code, divide api docs Still more to be done in object_mapper.rst cqlengine --- cassandra/cqlengine/columns.py | 123 +++++- cassandra/cqlengine/models.py | 111 ++++-- cassandra/cqlengine/query.py | 182 ++++++++- docs/api/cassandra/cluster.rst | 2 + docs/api/cassandra/cqlengine/columns.rst | 81 ++++ docs/api/cassandra/cqlengine/models.rst | 215 ++++++++++ docs/api/cassandra/cqlengine/query.rst | 42 ++ docs/api/index.rst | 11 + docs/conf.py | 2 +- docs/cqlengine/batches.rst | 108 +++++ docs/cqlengine/columns.rst | 4 - docs/cqlengine/connection.rst | 4 - docs/cqlengine/models.rst | 483 +++++------------------ docs/cqlengine/queryset.rst | 322 +-------------- docs/index.rst | 142 +------ docs/object_mapper.rst | 102 +++++ 16 files changed, 1034 insertions(+), 900 deletions(-) create mode 100644 docs/api/cassandra/cqlengine/columns.rst create mode 100644 docs/api/cassandra/cqlengine/models.rst create mode 100644 docs/api/cassandra/cqlengine/query.rst create mode 100644 docs/cqlengine/batches.rst create mode 100644 docs/object_mapper.rst diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index cc43a11712..1f13e3d724 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -92,6 +92,57 @@ class Column(object): instance_counter = 0 + primary_key = False + """ + bool flag, indicates this column is a primary key. The first primary key defined + on a model is the partition key (unless partition keys are set), all others are cluster keys + """ + + partition_key = False + + """ + indicates that this column should be the partition key, defining + more than one partition key column creates a compound partition key + """ + + index = False + """ + bool flag, indicates an index should be created for this column + """ + + db_field = False + """ + the fieldname this field will map to in the database + """ + + default = False + """ + the default value, can be a value or a callable (no args) + """ + + required = False + """ + boolean, is the field required? Model validation will raise and + exception if required is set to True and there is a None value assigned + """ + + clustering_order = False + """ + only applicable on clustering keys (primary keys that are not partition keys) + determines the order that the clustering keys are sorted on disk + """ + + polymorphic_key = False + """ + boolean, if set to True, this column will be used for saving and loading instances + of polymorphic tables + """ + + static = False + """ + boolean, if set to True, this is a static column, with a single value per partition + """ + def __init__(self, primary_key=False, partition_key=False, @@ -102,21 +153,6 @@ def __init__(self, clustering_order=None, polymorphic_key=False, static=False): - """ - :param primary_key: bool flag, indicates this column is a primary key. The first primary key defined - on a model is the partition key (unless partition keys are set), all others are cluster keys - :param partition_key: indicates that this column should be the partition key, defining - more than one partition key column creates a compound partition key - :param index: bool flag, indicates an index should be created for this column - :param db_field: the fieldname this field will map to in the database - :param default: the default value, can be a value or a callable (no args) - :param required: boolean, is the field required? Model validation will raise and - exception if required is set to True and there is a None value assigned - :param clustering_order: only applicable on clustering keys (primary keys that are not partition keys) - determines the order that the clustering keys are sorted on disk - :param polymorphic_key: boolean, if set to True, this column will be used for saving and loading instances - of polymorphic tables - """ self.partition_key = partition_key self.primary_key = partition_key or primary_key self.index = index @@ -216,6 +252,9 @@ def _val_is_null(self, val): class Blob(Column): + """ + Stores a raw binary value + """ db_type = 'blob' def to_database(self, value): @@ -233,15 +272,36 @@ def to_python(self, value): Bytes = Blob class Ascii(Column): + """ + Stores a US-ASCII character string + """ db_type = 'ascii' class Inet(Column): + """ + Stores an IP address in IPv4 or IPv6 format + """ db_type = 'inet' class Text(Column): + """ + Stores a UTF-8 encoded string + """ + db_type = 'text' + min_length = None + """ + Sets the minimum length of this string, for validation purposes. + Defaults to 1 if this is a ``required`` column. Otherwise, None. + """ + + max_length = None + """ + Sets the maximum length of this string, for validation purposes. + """ + def __init__(self, *args, **kwargs): self.min_length = kwargs.pop('min_length', 1 if kwargs.get('required', False) else None) self.max_length = kwargs.pop('max_length', None) @@ -262,6 +322,10 @@ def validate(self, value): class Integer(Column): + """ + Stores a 32-bit signed integer value + """ + db_type = 'int' def validate(self, value): @@ -280,10 +344,16 @@ def to_database(self, value): class BigInt(Integer): + """ + Stores a 64-bit signed long value + """ db_type = 'bigint' class VarInt(Column): + """ + Stores an arbitrary-precision integer + """ db_type = 'varint' def validate(self, value): @@ -311,6 +381,9 @@ def __init__(self, instance, column, value): class Counter(Integer): + """ + Stores a counter that can be inremented and decremented + """ db_type = 'counter' value_manager = CounterValueManager @@ -330,6 +403,9 @@ def __init__(self, class DateTime(Column): + """ + Stores a datetime value + """ db_type = 'timestamp' def to_python(self, value): @@ -358,6 +434,12 @@ def to_database(self, value): class Date(Column): + """ + *Note: this type is overloaded, and will likely be changed or removed to accommodate distinct date type + in a future version* + + Stores a date value, with no time-of-day + """ db_type = 'timestamp' def to_python(self, value): @@ -384,7 +466,7 @@ def to_database(self, value): class UUID(Column): """ - Type 1 or 4 UUID + Stores a type 1 or 4 UUID """ db_type = 'uuid' @@ -451,6 +533,9 @@ def from_datetime(self, dt): class Boolean(Column): + """ + Stores a boolean True or False value + """ db_type = 'boolean' def validate(self, value): @@ -467,6 +552,9 @@ def to_python(self, value): class Float(Column): + """ + Stores a 32-bit floating point value + """ db_type = 'double' def __init__(self, double_precision=True, **kwargs): @@ -489,6 +577,9 @@ def to_database(self, value): class Decimal(Column): + """ + Stores a variable precision decimal value + """ db_type = 'decimal' def validate(self, value): diff --git a/cassandra/cqlengine/models.py b/cassandra/cqlengine/models.py index 019d6beb94..298e9d930d 100644 --- a/cassandra/cqlengine/models.py +++ b/cassandra/cqlengine/models.py @@ -274,14 +274,12 @@ class MultipleObjectsReturned(_MultipleObjectsReturned): pass # _len is lazily created by __len__ - # table names will be generated automatically from it's model - # however, you can also define them manually here __table_name__ = None - # the keyspace for this model __keyspace__ = None - # polymorphism options + __default_ttl__ = None + __polymorphic_key__ = None # compaction options @@ -304,9 +302,9 @@ class MultipleObjectsReturned(_MultipleObjectsReturned): pass __queryset__ = ModelQuerySet __dmlquery__ = DMLQuery - __default_ttl__ = None # default ttl value to use __consistency__ = None # can be set per query + # Additional table properties __bloom_filter_fp_chance__ = None __caching__ = None @@ -429,7 +427,9 @@ def _can_update(self): @classmethod def _get_keyspace(cls): - """ Returns the manual keyspace, if set, otherwise the default keyspace """ + """ + Returns the manual keyspace, if set, otherwise the default keyspace + """ return cls.__keyspace__ or DEFAULT_KEYSPACE @classmethod @@ -489,7 +489,9 @@ def column_family_name(cls, include_keyspace=True): return '{}.{}'.format(cls._get_keyspace(), cf_name) def validate(self): - """ Cleans and validates the field values """ + """ + Cleans and validates the field values + """ for name, col in self._columns.items(): v = getattr(self, name) if v is None and not self._values[name].explicit and col.has_default: @@ -521,7 +523,9 @@ def __setitem__(self, key, val): return setattr(self, key, val) def __len__(self): - """ Returns the number of columns defined on that model. """ + """ + Returns the number of columns defined on that model. + """ try: return self._len except: @@ -529,15 +533,15 @@ def __len__(self): return self._len def keys(self): - """ Returns list of column's IDs. """ + """ Returns a list of column IDs. """ return [k for k in self] def values(self): - """ Returns list of column's values. """ + """ Returns list of column values. """ return [self[k] for k in self] def items(self): - """ Returns a list of columns's IDs/values. """ + """ Returns a list of column ID/value tuples. """ return [(k, self[k]) for k in self] def _as_dict(self): @@ -549,6 +553,13 @@ def _as_dict(self): @classmethod def create(cls, **kwargs): + """ + Create an instance of this model in the database. + + Takes the model column values as keyword arguments. + + Returns the instance. + """ extra_columns = set(kwargs.keys()) - set(cls._columns.keys()) if extra_columns: raise ValidationError("Incorrect columns passed: {}".format(extra_columns)) @@ -556,25 +567,52 @@ def create(cls, **kwargs): @classmethod def all(cls): + """ + Returns a queryset representing all stored objects + + This is a pass-through to the model objects().all() + """ return cls.objects.all() @classmethod def filter(cls, *args, **kwargs): - # if kwargs.values().count(None): - # raise CQLEngineException("Cannot pass None as a filter") + """ + Returns a queryset based on filter parameters. + This is a pass-through to the model objects().:method:`~cqlengine.queries.filter`. + """ return cls.objects.filter(*args, **kwargs) @classmethod def get(cls, *args, **kwargs): + """ + Returns a single object based on the passed filter constraints. + + This is a pass-through to the model objects().:method:`~cqlengine.queries.get`. + """ return cls.objects.get(*args, **kwargs) def timeout(self, timeout): + """ + Sets a timeout for use in :meth:`~.save`, :meth:`~.update`, and :meth:`~.delete` + operations + """ assert self._batch is None, 'Setting both timeout and batch is not supported' self._timeout = timeout return self def save(self): + """ + Saves an object to the database. + + .. code-block:: python + + #create a person instance + person = Person(first_name='Kimberly', last_name='Eggleston') + #saves it to Cassandra + person.save() + """ + # handle polymorphic models if self._is_polymorphic: if self._is_polymorphic_base: @@ -604,6 +642,15 @@ def save(self): return self def update(self, **values): + """ + Performs an update on the model instance. You can pass in values to set on the model + for updating, or you can call without values to execute an update against any modified + fields. If no fields on the model have been modified since loading, no query will be + performed. Model validation is performed normally. + + It is possible to do a blind update, that is, to update a field without having first selected the object out of the database. + See :ref:`Blind Updates ` + """ for k, v in values.items(): col = self._columns.get(k) @@ -644,7 +691,9 @@ def update(self, **values): return self def delete(self): - """ Deletes this instance """ + """ + Deletes the object from the database + """ self.__dmlquery__(self.__class__, self, batch=self._batch, timestamp=self._timestamp, @@ -652,7 +701,9 @@ def delete(self): timeout=self._timeout).delete() def get_changed_columns(self): - """ returns a list of the columns that have been updated since instantiation or save """ + """ + Returns a list of the columns that have been updated since instantiation or save + """ return [k for k,v in self._values.items() if v.changed] @classmethod @@ -668,12 +719,9 @@ def _inst_batch(self, batch): batch = hybrid_classmethod(_class_batch, _inst_batch) - class ModelMetaClass(type): def __new__(cls, name, bases, attrs): - """ - """ #move column definitions into columns dict #and set default column names column_dict = OrderedDict() @@ -839,11 +887,30 @@ def _get_polymorphic_base(bases): @six.add_metaclass(ModelMetaClass) class Model(BaseModel): + __abstract__ = True """ - the db name for the column family can be set as the attribute db_name, or - it will be generated from the class name + *Optional.* Indicates that this model is only intended to be used as a base class for other models. + You can't create tables for abstract models, but checks around schema validity are skipped during class construction. + """ + + __table_name__ = None + """ + *Optional.* Sets the name of the CQL table for this model. If left blank, the table name will be the name of the model, with it's module name as it's prefix. Manually defined table names are not inherited. """ - __abstract__ = True - # __metaclass__ = ModelMetaClass + __keyspace__ = None + """ + Sets the name of the keyspace used by this model. + """ + __default_ttl__ = None + """ + *Optional* The default ttl used by this model. + + This can be overridden by using the :meth:`~.ttl` method. + """ + + __polymorphic_key__ = None + """ + *Optional* Specifies a value for the polymorphic key when using model inheritance. + """ diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index 3acb007db6..2dcef0189b 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -379,9 +379,9 @@ def _get_result_constructor(self): def batch(self, batch_obj): """ - Adds a batch query to the mix - :param batch_obj: - :return: + Set a batch object to run the query on. + + Note: running a select query with a batch object will raise an exception """ if batch_obj is not None and not isinstance(batch_obj, BatchQuery): raise CQLEngineException('batch_obj must be a BatchQuery instance or None') @@ -396,9 +396,25 @@ def first(self): return None def all(self): + """ + Returns a queryset matching all rows + + .. code-block:: python + + for user in User.objects().all(): + print(user) + """ return copy.deepcopy(self) def consistency(self, consistency): + """ + Sets the consistency level for the operation. See :class:`.ConsistencyLevel`. + + .. code-block:: python + + for user in User.objects(id=3).consistency(CL.ONE): + print(user) + """ clone = copy.deepcopy(self) clone._consistency = consistency return clone @@ -464,9 +480,9 @@ def filter(self, *args, **kwargs): """ Adds WHERE arguments to the queryset, returning a new queryset - #TODO: show examples + See :ref:`retrieving-objects-with-filters` - :rtype: AbstractQuerySet + Returns a QuerySet filtered on the keyword arguments """ #add arguments to the where clause filters if len([x for x in kwargs.values() if x is None]): @@ -524,8 +540,17 @@ def get(self, *args, **kwargs): """ Returns a single instance matching this query, optionally with additional filter kwargs. - A DoesNotExistError will be raised if there are no rows matching the query - A MultipleObjectsFoundError will be raised if there is more than one row matching the queyr + See :ref:`retrieving-objects-with-filters` + + Returns a single object matching the QuerySet. + + .. code-block:: python + + user = User.get(id=1) + + If no objects are matched, a :class:`~.DoesNotExist` exception is raised. + + If more than one object is found, a :class:`~.MultipleObjectsReturned` exception is raised. """ if args or kwargs: return self.filter(*args, **kwargs).get() @@ -547,10 +572,34 @@ def _get_ordering_condition(self, colname): def order_by(self, *colnames): """ - orders the result set. - ordering can only use clustering columns. + Sets the column(s) to be used for ordering + + Default order is ascending, prepend a '-' to any column name for descending + + *Note: column names must be a clustering key* + + .. code-block:: python - Default order is ascending, prepend a '-' to the column name for descending + from uuid import uuid1,uuid4 + + class Comment(Model): + photo_id = UUID(primary_key=True) + comment_id = TimeUUID(primary_key=True, default=uuid1) # second primary key component is a clustering key + comment = Text() + + sync_table(Comment) + + u = uuid4() + for x in range(5): + Comment.create(photo_id=u, comment="test %d" % x) + + print("Normal") + for comment in Comment.objects(photo_id=u): + print comment.comment_id + + print("Reversed") + for comment in Comment.objects(photo_id=u).order_by("-comment_id"): + print comment.comment_id """ if len(colnames) == 0: clone = copy.deepcopy(self) @@ -566,7 +615,9 @@ def order_by(self, *colnames): return clone def count(self): - """ Returns the number of rows matched by this query """ + """ + Returns the number of rows matched by this query + """ if self._batch: raise CQLEngineException("Only inserts, updates, and deletes are available in batch mode") @@ -580,8 +631,14 @@ def count(self): def limit(self, v): """ - Sets the limit on the number of results returned - CQL has a default limit of 10,000 + Limits the number of results returned by Cassandra. + + *Note that CQL's default limit is 10,000, so all queries without a limit set explicitly will have an implicit limit of 10,000* + + .. code-block:: python + + for user in User.objects().limit(100): + print(user) """ if not (v is None or isinstance(v, six.integer_types)): raise TypeError @@ -597,8 +654,7 @@ def limit(self, v): def allow_filtering(self): """ - Enables the unwise practive of querying on a clustering - key without also defining a partition key + Enables the (usually) unwise practive of querying on a clustering key without also defining a partition key """ clone = copy.deepcopy(self) clone._allow_filtering = True @@ -700,7 +756,6 @@ def _construct_instance(values): class ModelQuerySet(AbstractQuerySet): """ - """ def _validate_select_where(self): """ Checks that a filterset will not create invalid select statement """ @@ -774,11 +829,19 @@ def values_list(self, *fields, **kwargs): return clone def ttl(self, ttl): + """ + Sets the ttl (in seconds) for modified data. + + *Note that running a select query with a ttl value will raise an exception* + """ clone = copy.deepcopy(self) clone._ttl = ttl return clone def timestamp(self, timestamp): + """ + Allows for custom timestamps to be saved with the record. + """ clone = copy.deepcopy(self) clone._timestamp = timestamp return clone @@ -791,7 +854,92 @@ def if_not_exists(self): return clone def update(self, **values): - """ Updates the rows in this queryset """ + """ + Performs an update on the row selected by the queryset. Include values to update in the + update like so: + + .. code-block:: python + + Model.objects(key=n).update(value='x') + + Passing in updates for columns which are not part of the model will raise a ValidationError. + + Per column validation will be performed, but instance level validation will not + (i.e., `Model.validate` is not called). This is sometimes referred to as a blind update. + + For example: + + .. code-block:: python + + class User(Model): + id = Integer(primary_key=True) + name = Text() + + setup(["localhost"], "test") + sync_table(User) + + u = User.create(id=1, name="jon") + + User.objects(id=1).update(name="Steve") + + # sets name to null + User.objects(id=1).update(name=None) + + + Also supported is blindly adding and removing elements from container columns, + without loading a model instance from Cassandra. + + Using the syntax `.update(column_name={x, y, z})` will overwrite the contents of the container, like updating a + non container column. However, adding `__` to the end of the keyword arg, makes the update call add + or remove items from the collection, without overwriting then entire column. + + Given the model below, here are the operations that can be performed on the different container columns: + + .. code-block:: python + + class Row(Model): + row_id = columns.Integer(primary_key=True) + set_column = columns.Set(Integer) + list_column = columns.List(Integer) + map_column = columns.Map(Integer, Integer) + + :class:`~cqlengine.columns.Set` + + - `add`: adds the elements of the given set to the column + - `remove`: removes the elements of the given set to the column + + + .. code-block:: python + + # add elements to a set + Row.objects(row_id=5).update(set_column__add={6}) + + # remove elements to a set + Row.objects(row_id=5).update(set_column__remove={4}) + + :class:`~cqlengine.columns.List` + + - `append`: appends the elements of the given list to the end of the column + - `prepend`: prepends the elements of the given list to the beginning of the column + + .. code-block:: python + + # append items to a list + Row.objects(row_id=5).update(list_column__append=[6, 7]) + + # prepend items to a list + Row.objects(row_id=5).update(list_column__prepend=[1, 2]) + + + :class:`~cqlengine.columns.Map` + + - `update`: adds the given keys/values to the columns, creating new entries if they didn't exist, and overwriting old ones if they did + + .. code-block:: python + + # add items to a map + Row.objects(row_id=5).update(map_column__update={1: 2, 3: 4}) + """ if not values: return diff --git a/docs/api/cassandra/cluster.rst b/docs/api/cassandra/cluster.rst index b1b1aebb0a..6532c9a026 100644 --- a/docs/api/cassandra/cluster.rst +++ b/docs/api/cassandra/cluster.rst @@ -5,6 +5,8 @@ .. autoclass:: Cluster ([contact_points=('127.0.0.1',)][, port=9042][, executor_threads=2], **attr_kwargs) + Any of the mutable Cluster attributes may be set as keyword arguments to the constructor. + .. autoattribute:: cql_version .. autoattribute:: protocol_version diff --git a/docs/api/cassandra/cqlengine/columns.rst b/docs/api/cassandra/cqlengine/columns.rst new file mode 100644 index 0000000000..6148dc67b1 --- /dev/null +++ b/docs/api/cassandra/cqlengine/columns.rst @@ -0,0 +1,81 @@ +``cassandra.cqlengine.columns`` - Column types for object mapping models +======================================================================== + +.. module:: cassandra.cqlengine.columns + +Columns +------- + + Columns in your models map to columns in your CQL table. You define CQL columns by defining column attributes on your model classes. + For a model to be valid it needs at least one primary key column and one non-primary key column. + + Just as in CQL, the order you define your columns in is important, and is the same order they are defined in on a model's corresponding table. + + Each column on your model definitions needs to be an instance of a Column class. + +.. autoclass:: Column(**kwargs) + + .. autoattribute:: primary_key + + .. autoattribute:: primary_key + + .. autoattribute:: partition_key + + .. autoattribute:: index + + .. autoattribute:: db_field + + .. autoattribute:: default + + .. autoattribute:: required + + .. autoattribute:: clustering_order + + .. autoattribute:: polymorphic_key + + .. autoattribute:: static + +Column Types +------------ + +Columns of all types are initialized by passing :class:`.Column` attributes to the constructor by keyword. + +.. autoclass:: Ascii(**kwargs) + +.. autoclass:: BigInt(**kwargs) + +.. autoclass:: Blob(**kwargs) + +.. autoclass:: Bytes(**kwargs) + +.. autoclass:: Boolean(**kwargs) + +.. autoclass:: Date(**kwargs) + +.. autoclass:: DateTime(**kwargs) + +.. autoclass:: Decimal(**kwargs) + +.. autoclass:: Float(**kwargs) + +.. autoclass:: Integer(**kwargs) + +.. autoclass:: List(**kwargs) + +.. autoclass:: Map(**kwargs) + +.. autoclass:: Set(**kwargs) + +.. autoclass:: Text(**kwargs) + + In addition to the :class:Column attributes, ``Text`` columns accept the following attributes for validation: + + .. autoattribute:: min_length + + .. autoattribute:: max_length + +.. autoclass:: TimeUUID(**kwargs) + +.. autoclass:: UUID(**kwargs) + +.. autoclass:: VarInt(**kwargs) diff --git a/docs/api/cassandra/cqlengine/models.rst b/docs/api/cassandra/cqlengine/models.rst new file mode 100644 index 0000000000..585b8d113f --- /dev/null +++ b/docs/api/cassandra/cqlengine/models.rst @@ -0,0 +1,215 @@ +``cassandra.cqlengine.models`` - Table models for object mapping +================================================================ + +.. module:: cassandra.cqlengine.models + +Model +----- +.. autoclass:: Model(\*\*kwargs) + + The initializer creates an instance of the model. Pass in keyword arguments for columns you've defined on the model. + + .. code-block:: python + class Person(Model): + id = columns.UUID(primary_key=True) + first_name = columns.Text() + last_name = columns.Text() + + person = Person(first_name='Blake', last_name='Eggleston') + person.first_name #returns 'Blake' + person.last_name #returns 'Eggleston' + + *Model attributes define how the model maps to tables in the database. These are class variables that should be set + when defining Model deriviatives.* + + .. autoattribute:: __abstract__ + :annotation: = False + + .. autoattribute:: __table_name__ + + .. autoattribute:: __keyspace__ + + .. _ttl-change: + .. autoattribute:: __default_ttl__ + + .. autoattribute:: __polymorphic_key__ + + See :ref:`table_polymorphism` for usage examples. + + *Each table can have its own set of configuration options. + These can be specified on a model with the following attributes:* + + See the `list of supported table properties for more information + `_. + + .. attribute:: __bloom_filter_fp_chance + + .. attribute:: __caching__ + + .. attribute:: __comment__ + + .. attribute:: __dclocal_read_repair_chance__ + + .. attribute:: __default_time_to_live__ + + .. attribute:: __gc_grace_seconds__ + + .. attribute:: __index_interval__ + + .. attribute:: __memtable_flush_period_in_ms__ + + .. attribute:: __populate_io_cache_on_flush__ + + .. attribute:: __read_repair_chance__ + + .. attribute:: __replicate_on_write__ + + + *Model presently supports specifying compaction options via class attributes. + cqlengine will only use your compaction options if you have a strategy set. + When a table is synced, it will be altered to match the compaction options set on your table. + This means that if you are changing settings manually they will be changed back on resync.* + + *Do not use the compaction settings of cqlengine if you want to manage your compaction settings manually.* + + *cqlengine supports all compaction options as of Cassandra 1.2.8.* + + **These attibutes will likely be replaced by a single options string in a future version, and are therefore deprecated.** + + .. attribute:: __compaction_bucket_high__ + :annotation: Deprecated + + .. attribute:: __compaction_bucket_low__ + :annotation: Deprecated + + .. attribute:: __compaction_max_compaction_threshold__ + :annotation: Deprecated + + .. attribute:: __compaction_min_compaction_threshold__ + :annotation: Deprecated + + .. attribute:: __compaction_min_sstable_size__ + :annotation: Deprecated + + .. attribute:: __compaction_sstable_size_in_mb__ + :annotation: Deprecated + + .. attribute:: __compaction_tombstone_compaction_interval__ + :annotation: Deprecated + + .. attribute:: __compaction_tombstone_threshold__ + :annotation: Deprecated + + For example: + + .. code-block:: python + + class User(Model): + __compaction__ = cqlengine.LeveledCompactionStrategy + __compaction_sstable_size_in_mb__ = 64 + __compaction_tombstone_threshold__ = .2 + + user_id = columns.UUID(primary_key=True) + name = columns.Text() + + or for SizeTieredCompaction: + + .. code-block:: python + + class TimeData(Model): + __compaction__ = SizeTieredCompactionStrategy + __compaction_bucket_low__ = .3 + __compaction_bucket_high__ = 2 + __compaction_min_threshold__ = 2 + __compaction_max_threshold__ = 64 + __compaction_tombstone_compaction_interval__ = 86400 + + Tables may use `LeveledCompactionStrategy` or `SizeTieredCompactionStrategy`. Both options are available in the top level cqlengine module. To reiterate, you will need to set your `__compaction__` option explicitly in order for cqlengine to handle any of your settings. + + *The base methods allow creating, storing, and querying modeled objects.* + + .. automethod:: create + + .. method:: if_not_exists() + + Check the existence of an object before insertion. The existence of an + object is determined by its primary key(s). And please note using this flag + would incur performance cost. + + if the insertion didn't applied, a LWTException exception would be raised. + + .. code-block:: python + + try: + TestIfNotExistsModel.if_not_exists().create(id=id, count=9, text='111111111111') + except LWTException as e: + # handle failure case + print e.existing # existing object + + This method is supported on Cassandra 2.0 or later. + + .. automethod:: save + + .. automethod:: update + + .. method:: iff(**values) + + Checks to ensure that the values specified are correct on the Cassandra cluster. + Simply specify the column(s) and the expected value(s). As with if_not_exists, + this incurs a performance cost. + + If the insertion isn't applied, a LWTException is raised + + .. code-block:: python + + t = TestTransactionModel(text='some text', count=5) + try: + t.iff(count=5).update('other text') + except LWTException as e: + # handle failure + + .. automethod:: get + + .. automethod:: filter + + .. automethod:: all + + .. automethod:: delete + + .. method:: batch(batch_object) + + Sets the batch object to run instance updates and inserts queries with. + + See :doc:`/cqlengine/batches` for usage examples + + .. automethod:: timeout + + .. method:: timestamp(timedelta_or_datetime) + + Sets the timestamp for the query + + .. method:: ttl(ttl_in_sec) + + Sets the ttl values to run instance updates and inserts queries with. + + .. automethod:: column_family_name + + Models also support dict-like access: + + .. method:: len(m) + + Returns the number of columns defined in the model + + .. method:: m[col_name] + + Returns the value of column ``col_name`` + + .. method:: m[col_name] = value + + Set ``m[col_name]`` to value + + .. automethod:: keys + + .. automethod:: values + + .. automethod:: items diff --git a/docs/api/cassandra/cqlengine/query.rst b/docs/api/cassandra/cqlengine/query.rst new file mode 100644 index 0000000000..8486011a98 --- /dev/null +++ b/docs/api/cassandra/cqlengine/query.rst @@ -0,0 +1,42 @@ +``cassandra.cqlengine.query`` - Query and filter model objects +================================================================= + +.. module:: cassandra.cqlengine.query + +QuerySet +-------- +QuerySet objects are typically obtained by calling :meth:`~.cassandra.cqlengine.models.Model.objects` on a model class. +The mehtods here are used to filter, order, and constrain results. + +.. autoclass:: ModelQuerySet + + .. automethod:: all + + .. automethod:: batch + + .. automethod:: consistency + + .. automethod:: count + + .. automethod:: filter + + .. automethod:: get + + .. automethod:: limit + + .. automethod:: order_by + + .. automethod:: allow_filtering + + .. automethod:: timestamp + + .. automethod:: ttl + + .. _blind_updates: + + .. automethod:: update + +.. autoclass:: DoesNotExist + +.. autoclass:: MultipleObjectsReturned + diff --git a/docs/api/index.rst b/docs/api/index.rst index 27aebf0f9a..1ee811db4f 100644 --- a/docs/api/index.rst +++ b/docs/api/index.rst @@ -1,6 +1,8 @@ API Documentation ================= +Core Driver +----------- .. toctree:: :maxdepth: 2 @@ -22,3 +24,12 @@ API Documentation cassandra/io/libevreactor cassandra/io/geventreactor cassandra/io/twistedreactor + +Object Mapper +------------- +.. toctree:: + :maxdepth: 1 + + cassandra/cqlengine/models + cassandra/cqlengine/columns + cassandra/cqlengine/query diff --git a/docs/conf.py b/docs/conf.py index b81ee37f2b..939a537dbd 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -55,7 +55,7 @@ release = cassandra.__version__ autodoc_member_order = 'bysource' -autoclass_content = 'both' +autoclass_content = 'class' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/docs/cqlengine/batches.rst b/docs/cqlengine/batches.rst new file mode 100644 index 0000000000..a567e31c27 --- /dev/null +++ b/docs/cqlengine/batches.rst @@ -0,0 +1,108 @@ +============= +Batch Queries +============= + +cqlengine supports batch queries using the BatchQuery class. Batch queries can be started and stopped manually, or within a context manager. To add queries to the batch object, you just need to precede the create/save/delete call with a call to batch, and pass in the batch object. + + +Batch Query General Use Pattern +=============================== + + You can only create, update, and delete rows with a batch query, attempting to read rows out of the database with a batch query will fail. + + .. code-block:: python + + from cqlengine import BatchQuery + + #using a context manager + with BatchQuery() as b: + now = datetime.now() + em1 = ExampleModel.batch(b).create(example_type=0, description="1", created_at=now) + em2 = ExampleModel.batch(b).create(example_type=0, description="2", created_at=now) + em3 = ExampleModel.batch(b).create(example_type=0, description="3", created_at=now) + + # -- or -- + + #manually + b = BatchQuery() + now = datetime.now() + em1 = ExampleModel.batch(b).create(example_type=0, description="1", created_at=now) + em2 = ExampleModel.batch(b).create(example_type=0, description="2", created_at=now) + em3 = ExampleModel.batch(b).create(example_type=0, description="3", created_at=now) + b.execute() + + # updating in a batch + + b = BatchQuery() + em1.description = "new description" + em1.batch(b).save() + em2.description = "another new description" + em2.batch(b).save() + b.execute() + + # deleting in a batch + b = BatchQuery() + ExampleModel.objects(id=some_id).batch(b).delete() + ExampleModel.objects(id=some_id2).batch(b).delete() + b.execute() + + + Typically you will not want the block to execute if an exception occurs inside the `with` block. However, in the case that this is desirable, it's achievable by using the following syntax: + + .. code-block:: python + + with BatchQuery(execute_on_exception=True) as b: + LogEntry.batch(b).create(k=1, v=1) + mystery_function() # exception thrown in here + LogEntry.batch(b).create(k=1, v=2) # this code is never reached due to the exception, but anything leading up to here will execute in the batch. + + If an exception is thrown somewhere in the block, any statements that have been added to the batch will still be executed. This is useful for some logging situations. + +Batch Query Execution Callbacks +=============================== + + In order to allow secondary tasks to be chained to the end of batch, BatchQuery instances allow callbacks to be + registered with the batch, to be executed immediately after the batch executes. + + Multiple callbacks can be attached to same BatchQuery instance, they are executed in the same order that they + are added to the batch. + + The callbacks attached to a given batch instance are executed only if the batch executes. If the batch is used as a + context manager and an exception is raised, the queued up callbacks will not be run. + + .. code-block:: python + + def my_callback(*args, **kwargs): + pass + + batch = BatchQuery() + + batch.add_callback(my_callback) + batch.add_callback(my_callback, 'positional arg', named_arg='named arg value') + + # if you need reference to the batch within the callback, + # just trap it in the arguments to be passed to the callback: + batch.add_callback(my_callback, cqlengine_batch=batch) + + # once the batch executes... + batch.execute() + + # the effect of the above scheduled callbacks will be similar to + my_callback() + my_callback('positional arg', named_arg='named arg value') + my_callback(cqlengine_batch=batch) + + Failure in any of the callbacks does not affect the batch's execution, as the callbacks are started after the execution + of the batch is complete. + +Logged vs Unlogged Batches +--------------------------- + By default, queries in cqlengine are LOGGED, which carries additional overhead from UNLOGGED. To explicitly state which batch type to use, simply: + + + .. code-block:: python + + from cqlengine.query import BatchType + with BatchQuery(batch_type=BatchType.Unlogged) as b: + LogEntry.batch(b).create(k=1, v=1) + LogEntry.batch(b).create(k=1, v=2) diff --git a/docs/cqlengine/columns.rst b/docs/cqlengine/columns.rst index 0f71e3c7ad..ac4b19a370 100644 --- a/docs/cqlengine/columns.rst +++ b/docs/cqlengine/columns.rst @@ -2,10 +2,6 @@ Columns ======= -**Users of versions < 0.4, please read this post before upgrading:** `Breaking Changes`_ - -.. _Breaking Changes: https://groups.google.com/forum/?fromgroups#!topic/cqlengine-users/erkSNe1JwuU - .. module:: cqlengine.columns .. class:: Bytes() diff --git a/docs/cqlengine/connection.rst b/docs/cqlengine/connection.rst index 658b3fc439..40a685b688 100644 --- a/docs/cqlengine/connection.rst +++ b/docs/cqlengine/connection.rst @@ -2,10 +2,6 @@ Connection ========== -**Users of versions < 0.4, please read this post before upgrading:** `Breaking Changes`_ - -.. _Breaking Changes: https://groups.google.com/forum/?fromgroups#!topic/cqlengine-users/erkSNe1JwuU - .. module:: cqlengine.connection The setup function in `cqlengine.connection` records the Cassandra servers to connect to. diff --git a/docs/cqlengine/models.rst b/docs/cqlengine/models.rst index bd50d95c05..aafd31d146 100644 --- a/docs/cqlengine/models.rst +++ b/docs/cqlengine/models.rst @@ -2,25 +2,26 @@ Models ====== -**Users of versions < 0.4, please read this post before upgrading:** `Breaking Changes`_ - -.. _Breaking Changes: https://groups.google.com/forum/?fromgroups#!topic/cqlengine-users/erkSNe1JwuU +.. module:: cqlengine.models -.. module:: cqlengine.connection +A model is a python class representing a CQL table. Models derive from :class:`Model`, and +define basic table properties and columns for a table. -.. module:: cqlengine.models +Columns in your models map to columns in your CQL table. You define CQL columns by defining column attributes on your model classes. +For a model to be valid it needs at least one primary key column and one non-primary key column. Just as in CQL, the order you define +your columns in is important, and is the same order they are defined in on a model's corresponding table. -A model is a python class representing a CQL table. +Some basic examples defining models are shown below. Consult the :doc:`Model API docs ` and :doc:`Column API docs ` for complete details. -Examples -======== +Example Definitions +=================== -This example defines a Person table, with the columns ``first_name`` and ``last_name`` +This example defines a ``Person`` table, with the columns ``first_name`` and ``last_name`` .. code-block:: python - from cqlengine import columns - from cqlengine.models import Model + from cassandra.cqlengine import columns + from cassandra.cqlengine.models import Model class Person(Model): id = columns.UUID(primary_key=True) @@ -37,14 +38,14 @@ The Person model would create this CQL table: first_name text, last_name text, PRIMARY KEY (id) - ) + ); Here's an example of a comment table created with clustering keys, in descending order: .. code-block:: python - from cqlengine import columns - from cqlengine.models import Model + from cassandra.cqlengine import columns + from cassandra.cqlengine.models import Model class Comment(Model): photo_id = columns.UUID(primary_key=True) @@ -60,418 +61,122 @@ The Comment model's ``create table`` would look like the following: comment_id timeuuid, comment text, PRIMARY KEY (photo_id, comment_id) - ) WITH CLUSTERING ORDER BY (comment_id DESC) + ) WITH CLUSTERING ORDER BY (comment_id DESC); -To sync the models to the database, you may do the following: +To sync the models to the database, you may do the following*: .. code-block:: python - from cqlengine.management import sync_table + from cassandra.cqlengine.management import sync_table sync_table(Person) sync_table(Comment) +\*Note: synchronizing models causes schema changes, and should be done with caution. +Please see the discussion in :doc:`manage_schemas` for considerations. -Columns -======= - - Columns in your models map to columns in your CQL table. You define CQL columns by defining column attributes on your model classes. For a model to be valid it needs at least one primary key column and one non-primary key column. - - Just as in CQL, the order you define your columns in is important, and is the same order they are defined in on a model's corresponding table. - -Column Types -============ - - Each column on your model definitions needs to an instance of a Column class. The column types that are included with cqlengine as of this writing are: - - * :class:`~cqlengine.columns.Bytes` - * :class:`~cqlengine.columns.Ascii` - * :class:`~cqlengine.columns.Text` - * :class:`~cqlengine.columns.Integer` - * :class:`~cqlengine.columns.BigInt` - * :class:`~cqlengine.columns.DateTime` - * :class:`~cqlengine.columns.UUID` - * :class:`~cqlengine.columns.TimeUUID` - * :class:`~cqlengine.columns.Boolean` - * :class:`~cqlengine.columns.Float` - * :class:`~cqlengine.columns.Decimal` - * :class:`~cqlengine.columns.Set` - * :class:`~cqlengine.columns.List` - * :class:`~cqlengine.columns.Map` - -Column Options --------------- - - Each column can be defined with optional arguments to modify the way they behave. While some column types may - define additional column options, these are the options that are available on all columns: - - :attr:`~cqlengine.columns.BaseColumn.primary_key` - If True, this column is created as a primary key field. A model can have multiple primary keys. Defaults to False. - - *In CQL, there are 2 types of primary keys: partition keys and clustering keys. As with CQL, the first - primary key is the partition key, and all others are clustering keys, unless partition keys are specified - manually using* :attr:`~cqlengine.columns.BaseColumn.partition_key` - - :attr:`~cqlengine.columns.BaseColumn.partition_key` - If True, this column is created as partition primary key. There may be many partition keys defined, - forming a *composite partition key* - - :attr:`~cqlengine.columns.BaseColumn.clustering_order` - ``ASC`` or ``DESC``, determines the clustering order of a clustering key. - - :attr:`~cqlengine.columns.BaseColumn.index` - If True, an index will be created for this column. Defaults to False. - - :attr:`~cqlengine.columns.BaseColumn.db_field` - Explicitly sets the name of the column in the database table. If this is left blank, the column name will be - the same as the name of the column attribute. Defaults to None. - - :attr:`~cqlengine.columns.BaseColumn.default` - The default value for this column. If a model instance is saved without a value for this column having been - defined, the default value will be used. This can be either a value or a callable object (ie: datetime.now is a valid default argument). - Callable defaults will be called each time a default is assigned to a None value - - :attr:`~cqlengine.columns.BaseColumn.required` - If True, this model cannot be saved without a value defined for this column. Defaults to False. Primary key fields always require values. - - :attr:`~cqlengine.columns.BaseColumn.static` - Defined a column as static. Static columns are shared by all rows in a partition. - -Model Methods -============= - Below are the methods that can be called on model instances. - -.. class:: Model(\*\*values) - - Creates an instance of the model. Pass in keyword arguments for columns you've defined on the model. - - *Example* - - .. code-block:: python - - #using the person model from earlier: - class Person(Model): - id = columns.UUID(primary_key=True) - first_name = columns.Text() - last_name = columns.Text() - - person = Person(first_name='Blake', last_name='Eggleston') - person.first_name #returns 'Blake' - person.last_name #returns 'Eggleston' - - - .. method:: save() - - Saves an object to the database - - *Example* - - .. code-block:: python - - #create a person instance - person = Person(first_name='Kimberly', last_name='Eggleston') - #saves it to Cassandra - person.save() - - .. method:: delete() - - Deletes the object from the database. - - .. method:: batch(batch_object) - - Sets the batch object to run instance updates and inserts queries with. - - .. method:: timestamp(timedelta_or_datetime) - - Sets the timestamp for the query - - .. method:: ttl(ttl_in_sec) - - Sets the ttl values to run instance updates and inserts queries with. - - .. method:: if_not_exists() - - Check the existence of an object before insertion. The existence of an - object is determined by its primary key(s). And please note using this flag - would incur performance cost. - - if the insertion didn't applied, a LWTException exception would be raised. - - *Example* - - .. code-block:: python - try: - TestIfNotExistsModel.if_not_exists().create(id=id, count=9, text='111111111111') - except LWTException as e: - # handle failure case - print e.existing # existing object - - This method is supported on Cassandra 2.0 or later. - - .. method:: iff(**values) - - Checks to ensure that the values specified are correct on the Cassandra cluster. - Simply specify the column(s) and the expected value(s). As with if_not_exists, - this incurs a performance cost. - - If the insertion isn't applied, a LWTException is raised - - .. code-block:: - t = TestTransactionModel(text='some text', count=5) - try: - t.iff(count=5).update('other text') - except LWTException as e: - # handle failure - - .. method:: update(**values) - - Performs an update on the model instance. You can pass in values to set on the model - for updating, or you can call without values to execute an update against any modified - fields. If no fields on the model have been modified since loading, no query will be - performed. Model validation is performed normally. - - It is possible to do a blind update, that is, to update a field without having first selected the object out of the database. See :ref:`Blind Updates ` - - .. method:: get_changed_columns() - - Returns a list of column names that have changed since the model was instantiated or saved - -Model Attributes -================ - - .. attribute:: Model.__abstract__ - - *Optional.* Indicates that this model is only intended to be used as a base class for other models. You can't create tables for abstract models, but checks around schema validity are skipped during class construction. - - .. attribute:: Model.__table_name__ - - *Optional.* Sets the name of the CQL table for this model. If left blank, the table name will be the name of the model, with it's module name as it's prefix. Manually defined table names are not inherited. - - .. _keyspace-change: - .. attribute:: Model.__keyspace__ - - Sets the name of the keyspace used by this model. - - **Prior to cqlengine 0.16, this setting defaulted - to 'cqlengine'. As of 0.16, this field needs to be set on all non-abstract models, or their base classes.** +For examples on manipulating data and creating queries, see :doc:`queryset` - .. _ttl-change: - .. attribute:: Model.__default_ttl__ - - Sets the default ttl used by this model. This can be overridden by using the ``ttl(ttl_in_sec)`` method. - - -Table Polymorphism -================== - - As of cqlengine 0.8, it is possible to save and load different model classes using a single CQL table. - This is useful in situations where you have different object types that you want to store in a single cassandra row. - - For instance, suppose you want a table that stores rows of pets owned by an owner: - - .. code-block:: python - - class Pet(Model): - __table_name__ = 'pet' - owner_id = UUID(primary_key=True) - pet_id = UUID(primary_key=True) - pet_type = Text(polymorphic_key=True) - name = Text() - - def eat(self, food): - pass - - def sleep(self, time): - pass - - class Cat(Pet): - __polymorphic_key__ = 'cat' - cuteness = Float() - - def tear_up_couch(self): - pass - - class Dog(Pet): - __polymorphic_key__ = 'dog' - fierceness = Float() - - def bark_all_night(self): - pass - - After calling ``sync_table`` on each of these tables, the columns defined in each model will be added to the - ``pet`` table. Additionally, saving ``Cat`` and ``Dog`` models will save the meta data needed to identify each row - as either a cat or dog. +Manipulating model instances as dictionaries +============================================ - To setup a polymorphic model structure, follow these steps +Model instances can be accessed like dictionaries. - 1. Create a base model with a column set as the polymorphic_key (set ``polymorphic_key=True`` in the column definition) - 2. Create subclass models, and define a unique ``__polymorphic_key__`` value on each - 3. Run ``sync_table`` on each of the sub tables +.. code-block:: python - **About the polymorphic key** + class Person(Model): + first_name = columns.Text() + last_name = columns.Text() - The polymorphic key is what cqlengine uses under the covers to map logical cql rows to the appropriate model type. The - base model maintains a map of polymorphic keys to subclasses. When a polymorphic model is saved, this value is automatically - saved into the polymorphic key column. You can set the polymorphic key column to any column type that you like, with - the exception of container and counter columns, although ``Integer`` columns make the most sense. Additionally, if you - set ``index=True`` on your polymorphic key column, you can execute queries against polymorphic subclasses, and a - ``WHERE`` clause will be automatically added to your query, returning only rows of that type. Note that you must - define a unique ``__polymorphic_key__`` value to each subclass, and that you can only assign a single polymorphic - key column per model + kevin = Person.create(first_name="Kevin", last_name="Deldycke") + dict(kevin) # returns {'first_name': 'Kevin', 'last_name': 'Deldycke'} + kevin['first_name'] # returns 'Kevin' + kevin.keys() # returns ['first_name', 'last_name'] + kevin.values() # returns ['Kevin', 'Deldycke'] + kevin.items() # returns [('first_name', 'Kevin'), ('last_name', 'Deldycke')] + kevin['first_name'] = 'KEVIN5000' # changes the models first name Extending Model Validation ========================== - Each time you save a model instance in cqlengine, the data in the model is validated against the schema you've defined - for your model. Most of the validation is fairly straightforward, it basically checks that you're not trying to do - something like save text into an integer column, and it enforces the ``required`` flag set on column definitions. - It also performs any transformations needed to save the data properly. - - However, there are often additional constraints or transformations you want to impose on your data, beyond simply - making sure that Cassandra won't complain when you try to insert it. To define additional validation on a model, - extend the model's validation method: - - .. code-block:: python - - class Member(Model): - person_id = UUID(primary_key=True) - name = Text(required=True) - - def validate(self): - super(Member, self).validate() - if self.name == 'jon': - raise ValidationError('no jon\'s allowed') - - *Note*: while not required, the convention is to raise a ``ValidationError`` (``from cqlengine import ValidationError``) - if validation fails - - -Table Properties -================ - - Each table can have its own set of configuration options. - These can be specified on a model with the following attributes: - - .. attribute:: Model.__bloom_filter_fp_chance - - .. attribute:: Model.__caching__ +Each time you save a model instance in cqlengine, the data in the model is validated against the schema you've defined +for your model. Most of the validation is fairly straightforward, it basically checks that you're not trying to do +something like save text into an integer column, and it enforces the ``required`` flag set on column definitions. +It also performs any transformations needed to save the data properly. - .. attribute:: Model.__comment__ +However, there are often additional constraints or transformations you want to impose on your data, beyond simply +making sure that Cassandra won't complain when you try to insert it. To define additional validation on a model, +extend the model's validation method: - .. attribute:: Model.__dclocal_read_repair_chance__ - - .. attribute:: Model.__default_time_to_live__ - - .. attribute:: Model.__gc_grace_seconds__ - - .. attribute:: Model.__index_interval__ - - .. attribute:: Model.__memtable_flush_period_in_ms__ - - .. attribute:: Model.__populate_io_cache_on_flush__ - - .. attribute:: Model.__read_repair_chance__ - - .. attribute:: Model.__replicate_on_write__ - - Example: - - .. code-block:: python - - from cqlengine import CACHING_ROWS_ONLY, columns - from cqlengine.models import Model - - class User(Model): - __caching__ = CACHING_ROWS_ONLY # cache only rows instead of keys only by default - __gc_grace_seconds__ = 86400 # 1 day instead of the default 10 days - - user_id = columns.UUID(primary_key=True) - name = columns.Text() - - Will produce the following CQL statement: +.. code-block:: python - .. code-block:: sql + class Member(Model): + person_id = UUID(primary_key=True) + name = Text(required=True) - CREATE TABLE cqlengine.user ( - user_id uuid, - name text, - PRIMARY KEY (user_id) - ) WITH caching = 'rows_only' - AND gc_grace_seconds = 86400; + def validate(self): + super(Member, self).validate() + if self.name == 'jon': + raise ValidationError('no jon\'s allowed') - See the `list of supported table properties for more information - `_. +*Note*: while not required, the convention is to raise a ``ValidationError`` (``from cqlengine import ValidationError``) +if validation fails. +.. _table_polymorphism: -Compaction Options +Table Polymorphism ================== +It is possible to save and load different model classes using a single CQL table. +This is useful in situations where you have different object types that you want to store in a single cassandra row. - As of cqlengine 0.7 we've added support for specifying compaction options. cqlengine will only use your compaction options if you have a strategy set. When a table is synced, it will be altered to match the compaction options set on your table. This means that if you are changing settings manually they will be changed back on resync. Do not use the compaction settings of cqlengine if you want to manage your compaction settings manually. - - cqlengine supports all compaction options as of Cassandra 1.2.8. +For instance, suppose you want a table that stores rows of pets owned by an owner: - Available Options: - - .. attribute:: Model.__compaction_bucket_high__ - - .. attribute:: Model.__compaction_bucket_low__ - - .. attribute:: Model.__compaction_max_compaction_threshold__ - - .. attribute:: Model.__compaction_min_compaction_threshold__ - - .. attribute:: Model.__compaction_min_sstable_size__ - - .. attribute:: Model.__compaction_sstable_size_in_mb__ - - .. attribute:: Model.__compaction_tombstone_compaction_interval__ - - .. attribute:: Model.__compaction_tombstone_threshold__ - - For example: - - .. code-block:: python - - class User(Model): - __compaction__ = cqlengine.LeveledCompactionStrategy - __compaction_sstable_size_in_mb__ = 64 - __compaction_tombstone_threshold__ = .2 - - user_id = columns.UUID(primary_key=True) - name = columns.Text() +.. code-block:: python - or for SizeTieredCompaction: + class Pet(Model): + __table_name__ = 'pet' + owner_id = UUID(primary_key=True) + pet_id = UUID(primary_key=True) + pet_type = Text(polymorphic_key=True) + name = Text() - .. code-block:: python + def eat(self, food): + pass - class TimeData(Model): - __compaction__ = SizeTieredCompactionStrategy - __compaction_bucket_low__ = .3 - __compaction_bucket_high__ = 2 - __compaction_min_threshold__ = 2 - __compaction_max_threshold__ = 64 - __compaction_tombstone_compaction_interval__ = 86400 + def sleep(self, time): + pass - Tables may use `LeveledCompactionStrategy` or `SizeTieredCompactionStrategy`. Both options are available in the top level cqlengine module. To reiterate, you will need to set your `__compaction__` option explicitly in order for cqlengine to handle any of your settings. + class Cat(Pet): + __polymorphic_key__ = 'cat' + cuteness = Float() + def tear_up_couch(self): + pass -Manipulating model instances as dictionaries -============================================ + class Dog(Pet): + __polymorphic_key__ = 'dog' + fierceness = Float() - As of cqlengine 0.12, we've added support for treating model instances like dictionaries. See below for examples. + def bark_all_night(self): + pass - .. code-block:: python +After calling ``sync_table`` on each of these tables, the columns defined in each model will be added to the +``pet`` table. Additionally, saving ``Cat`` and ``Dog`` models will save the meta data needed to identify each row +as either a cat or dog. - class Person(Model): - first_name = columns.Text() - last_name = columns.Text() +To setup a polymorphic model structure, follow these steps - kevin = Person.create(first_name="Kevin", last_name="Deldycke") - dict(kevin) # returns {'first_name': 'Kevin', 'last_name': 'Deldycke'} - kevin['first_name'] # returns 'Kevin' - kevin.keys() # returns ['first_name', 'last_name'] - kevin.values() # returns ['Kevin', 'Deldycke'] - kevin.items() # returns [('first_name', 'Kevin'), ('last_name', 'Deldycke')] +1. Create a base model with a column set as the polymorphic_key (set ``polymorphic_key=True`` in the column definition) +2. Create subclass models, and define a unique ``__polymorphic_key__`` value on each +3. Run ``sync_table`` on each of the sub tables - kevin['first_name'] = 'KEVIN5000' # changes the models first name +**About the polymorphic key** +The polymorphic key is what cqlengine uses under the covers to map logical cql rows to the appropriate model type. The +base model maintains a map of polymorphic keys to subclasses. When a polymorphic model is saved, this value is automatically +saved into the polymorphic key column. You can set the polymorphic key column to any column type that you like, with +the exception of container and counter columns, although ``Integer`` columns make the most sense. Additionally, if you +set ``index=True`` on your polymorphic key column, you can execute queries against polymorphic subclasses, and a +``WHERE`` clause will be automatically added to your query, returning only rows of that type. Note that you must +define a unique ``__polymorphic_key__`` value to each subclass, and that you can only assign a single polymorphic +key column per model diff --git a/docs/cqlengine/queryset.rst b/docs/cqlengine/queryset.rst index 79c07e428e..b88c908aac 100644 --- a/docs/cqlengine/queryset.rst +++ b/docs/cqlengine/queryset.rst @@ -2,13 +2,7 @@ Making Queries ============== -**Users of versions < 0.4, please read this post before upgrading:** `Breaking Changes`_ - -.. _Breaking Changes: https://groups.google.com/forum/?fromgroups#!topic/cqlengine-users/erkSNe1JwuU - -.. module:: cqlengine.connection - -.. module:: cqlengine.query +.. module:: cqlengine.queryset Retrieving objects ================== @@ -278,320 +272,6 @@ Values Lists values = list(TestModel.objects.values_list('clustering_key', flat=True)) # [19L, 18L, 17L, 16L, 15L, 14L, 13L, 12L, 11L, 10L, 9L, 8L, 7L, 6L, 5L, 4L, 3L, 2L, 1L, 0L] - - -Batch Queries -============= - - cqlengine now supports batch queries using the BatchQuery class. Batch queries can be started and stopped manually, or within a context manager. To add queries to the batch object, you just need to precede the create/save/delete call with a call to batch, and pass in the batch object. - - - -Batch Query General Use Pattern -------------------------------- - - You can only create, update, and delete rows with a batch query, attempting to read rows out of the database with a batch query will fail. - - .. code-block:: python - - from cqlengine import BatchQuery - - #using a context manager - with BatchQuery() as b: - now = datetime.now() - em1 = ExampleModel.batch(b).create(example_type=0, description="1", created_at=now) - em2 = ExampleModel.batch(b).create(example_type=0, description="2", created_at=now) - em3 = ExampleModel.batch(b).create(example_type=0, description="3", created_at=now) - - # -- or -- - - #manually - b = BatchQuery() - now = datetime.now() - em1 = ExampleModel.batch(b).create(example_type=0, description="1", created_at=now) - em2 = ExampleModel.batch(b).create(example_type=0, description="2", created_at=now) - em3 = ExampleModel.batch(b).create(example_type=0, description="3", created_at=now) - b.execute() - - # updating in a batch - - b = BatchQuery() - em1.description = "new description" - em1.batch(b).save() - em2.description = "another new description" - em2.batch(b).save() - b.execute() - - # deleting in a batch - b = BatchQuery() - ExampleModel.objects(id=some_id).batch(b).delete() - ExampleModel.objects(id=some_id2).batch(b).delete() - b.execute() - - - Typically you will not want the block to execute if an exception occurs inside the `with` block. However, in the case that this is desirable, it's achievable by using the following syntax: - - .. code-block:: python - - with BatchQuery(execute_on_exception=True) as b: - LogEntry.batch(b).create(k=1, v=1) - mystery_function() # exception thrown in here - LogEntry.batch(b).create(k=1, v=2) # this code is never reached due to the exception, but anything leading up to here will execute in the batch. - - If an exception is thrown somewhere in the block, any statements that have been added to the batch will still be executed. This is useful for some logging situations. - -Batch Query Execution Callbacks -------------------------------- - - In order to allow secondary tasks to be chained to the end of batch, BatchQuery instances allow callbacks to be - registered with the batch, to be executed immediately after the batch executes. - - Multiple callbacks can be attached to same BatchQuery instance, they are executed in the same order that they - are added to the batch. - - The callbacks attached to a given batch instance are executed only if the batch executes. If the batch is used as a - context manager and an exception is raised, the queued up callbacks will not be run. - - .. code-block:: python - - def my_callback(*args, **kwargs): - pass - - batch = BatchQuery() - - batch.add_callback(my_callback) - batch.add_callback(my_callback, 'positional arg', named_arg='named arg value') - - # if you need reference to the batch within the callback, - # just trap it in the arguments to be passed to the callback: - batch.add_callback(my_callback, cqlengine_batch=batch) - - # once the batch executes... - batch.execute() - - # the effect of the above scheduled callbacks will be similar to - my_callback() - my_callback('positional arg', named_arg='named arg value') - my_callback(cqlengine_batch=batch) - - Failure in any of the callbacks does not affect the batch's execution, as the callbacks are started after the execution - of the batch is complete. - -Logged vs Unlogged Batches ---------------------------- - By default, queries in cqlengine are LOGGED, which carries additional overhead from UNLOGGED. To explicitly state which batch type to use, simply: - - - .. code-block:: python - - from cqlengine.query import BatchType - with BatchQuery(batch_type=BatchType.Unlogged) as b: - LogEntry.batch(b).create(k=1, v=1) - LogEntry.batch(b).create(k=1, v=2) - - - -QuerySet method reference -========================= - -.. class:: QuerySet - - .. method:: all() - - Returns a queryset matching all rows - - .. code-block:: python - - for user in User.objects().all(): - print(user) - - - .. method:: batch(batch_object) - - Sets the batch object to run the query on. Note that running a select query with a batch object will raise an exception - - .. method:: consistency(consistency_setting) - - Sets the consistency level for the operation. Options may be imported from the top level :attr:`cqlengine` package. - - .. code-block:: python - - for user in User.objects(id=3).consistency(ONE): - print(user) - - - .. method:: count() - - Returns the number of matching rows in your QuerySet - - .. code-block:: python - - print(User.objects().count()) - - .. method:: filter(\*\*values) - - :param values: See :ref:`retrieving-objects-with-filters` - - Returns a QuerySet filtered on the keyword arguments - - .. method:: get(\*\*values) - - :param values: See :ref:`retrieving-objects-with-filters` - - Returns a single object matching the QuerySet. If no objects are matched, a :attr:`~models.Model.DoesNotExist` exception is raised. If more than one object is found, a :attr:`~models.Model.MultipleObjectsReturned` exception is raised. - - .. code-block:: python - - user = User.get(id=1) - - - .. method:: limit(num) - - Limits the number of results returned by Cassandra. - - *Note that CQL's default limit is 10,000, so all queries without a limit set explicitly will have an implicit limit of 10,000* - - .. code-block:: python - - for user in User.objects().limit(100): - print(user) - - .. method:: order_by(field_name) - - :param field_name: the name of the field to order on. *Note: the field_name must be a clustering key* - :type field_name: string - - Sets the field to order on. - - .. code-block:: python - - from uuid import uuid1,uuid4 - - class Comment(Model): - photo_id = UUID(primary_key=True) - comment_id = TimeUUID(primary_key=True, default=uuid1) # auto becomes clustering key - comment = Text() - - sync_table(Comment) - - u = uuid4() - for x in range(5): - Comment.create(photo_id=u, comment="test %d" % x) - - print("Normal") - for comment in Comment.objects(photo_id=u): - print comment.comment_id - - print("Reversed") - for comment in Comment.objects(photo_id=u).order_by("-comment_id"): - print comment.comment_id - - - .. method:: allow_filtering() - - Enables the (usually) unwise practive of querying on a clustering key without also defining a partition key - - .. method:: timestamp(timestamp_or_long_or_datetime) - - Allows for custom timestamps to be saved with the record. - - .. method:: ttl(ttl_in_seconds) - - :param ttl_in_seconds: time in seconds in which the saved values should expire - :type ttl_in_seconds: int - - Sets the ttl to run the query query with. Note that running a select query with a ttl value will raise an exception - - .. _blind_updates: - - .. method:: update(**values) - - Performs an update on the row selected by the queryset. Include values to update in the - update like so: - - .. code-block:: python - - Model.objects(key=n).update(value='x') - - Passing in updates for columns which are not part of the model will raise a ValidationError. - Per column validation will be performed, but instance level validation will not - (`Model.validate` is not called). This is sometimes referred to as a blind update. - - For example: - - .. code-block:: python - - class User(Model): - id = Integer(primary_key=True) - name = Text() - - setup(["localhost"], "test") - sync_table(User) - - u = User.create(id=1, name="jon") - - User.objects(id=1).update(name="Steve") - - # sets name to null - User.objects(id=1).update(name=None) - - - The queryset update method also supports blindly adding and removing elements from container columns, without - loading a model instance from Cassandra. - - Using the syntax `.update(column_name={x, y, z})` will overwrite the contents of the container, like updating a - non container column. However, adding `__` to the end of the keyword arg, makes the update call add - or remove items from the collection, without overwriting then entire column. - - - Given the model below, here are the operations that can be performed on the different container columns: - - .. code-block:: python - - class Row(Model): - row_id = columns.Integer(primary_key=True) - set_column = columns.Set(Integer) - list_column = columns.Set(Integer) - map_column = columns.Set(Integer, Integer) - - :class:`~cqlengine.columns.Set` - - - `add`: adds the elements of the given set to the column - - `remove`: removes the elements of the given set to the column - - - .. code-block:: python - - # add elements to a set - Row.objects(row_id=5).update(set_column__add={6}) - - # remove elements to a set - Row.objects(row_id=5).update(set_column__remove={4}) - - :class:`~cqlengine.columns.List` - - - `append`: appends the elements of the given list to the end of the column - - `prepend`: prepends the elements of the given list to the beginning of the column - - .. code-block:: python - - # append items to a list - Row.objects(row_id=5).update(list_column__append=[6, 7]) - - # prepend items to a list - Row.objects(row_id=5).update(list_column__prepend=[1, 2]) - - - :class:`~cqlengine.columns.Map` - - - `update`: adds the given keys/values to the columns, creating new entries if they didn't exist, and overwriting old ones if they did - - .. code-block:: python - - # add items to a map - Row.objects(row_id=5).update(map_column__update={1: 2, 3: 4}) - - Per Query Timeouts =================== diff --git a/docs/index.rst b/docs/index.rst index 06ec3ee609..48ee59cdce 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -18,6 +18,9 @@ Contents :doc:`getting_started` A guide through the first steps of connecting to Cassandra and executing queries. +:doc:`object_mapper` + Introduction to the integrated object mapper, cqlengine + :doc:`api/index` The API documentation. @@ -36,6 +39,19 @@ Contents :doc:`security` An overview of the security features of the driver. +.. toctree:: + :hidden: + + api/index + installation + getting_started + upgrading + performance + query_paging + security + user_defined_types + object_mapper + Getting Help ------------ Please send questions to the `mailing list `_. @@ -50,135 +66,9 @@ Please report any bugs and make any feature requests on the If you would like to contribute, please feel free to open a pull request. - -.. toctree:: - :hidden: - - api/index - installation - getting_started - upgrading - performance - query_paging - security - user_defined_types - Indices and Tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` - -.. - cqlengine documentation - ======================= - - **Users of versions < 0.16, the default keyspace 'cqlengine' has been removed. Please read this before upgrading:** :ref:`Breaking Changes ` - - cqlengine is a Cassandra CQL 3 Object Mapper for Python - - :ref:`getting-started` - - Download - ======== - - `Github `_ - - `PyPi `_ - - Contents: - ========= - - .. toctree:: - :maxdepth: 2 - - topics/models - topics/queryset - topics/columns - topics/connection - topics/manage_schemas - topics/external_resources - topics/related_projects - topics/third_party - topics/development - topics/faq - - .. _getting-started: - - Getting Started - =============== - - .. code-block:: python - - #first, define a model - from cqlengine import columns - from cqlengine import Model - - class ExampleModel(Model): - example_id = columns.UUID(primary_key=True, default=uuid.uuid4) - example_type = columns.Integer(index=True) - created_at = columns.DateTime() - description = columns.Text(required=False) - - #next, setup the connection to your cassandra server(s)... - >>> from cqlengine import connection - - # see http://datastax.github.io/python-driver/api/cassandra/cluster.html for options - # the list of hosts will be passed to create a Cluster() instance - >>> connection.setup(['127.0.0.1'], "cqlengine") - - # if you're connecting to a 1.2 cluster - >>> connection.setup(['127.0.0.1'], "cqlengine", protocol_version=1) - - #...and create your CQL table - >>> from cqlengine.management import sync_table - >>> sync_table(ExampleModel) - - #now we can create some rows: - >>> em1 = ExampleModel.create(example_type=0, description="example1", created_at=datetime.now()) - >>> em2 = ExampleModel.create(example_type=0, description="example2", created_at=datetime.now()) - >>> em3 = ExampleModel.create(example_type=0, description="example3", created_at=datetime.now()) - >>> em4 = ExampleModel.create(example_type=0, description="example4", created_at=datetime.now()) - >>> em5 = ExampleModel.create(example_type=1, description="example5", created_at=datetime.now()) - >>> em6 = ExampleModel.create(example_type=1, description="example6", created_at=datetime.now()) - >>> em7 = ExampleModel.create(example_type=1, description="example7", created_at=datetime.now()) - >>> em8 = ExampleModel.create(example_type=1, description="example8", created_at=datetime.now()) - - #and now we can run some queries against our table - >>> ExampleModel.objects.count() - 8 - >>> q = ExampleModel.objects(example_type=1) - >>> q.count() - 4 - >>> for instance in q: - >>> print instance.description - example5 - example6 - example7 - example8 - - #here we are applying additional filtering to an existing query - #query objects are immutable, so calling filter returns a new - #query object - >>> q2 = q.filter(example_id=em5.example_id) - - >>> q2.count() - 1 - >>> for instance in q2: - >>> print instance.description - example5 - - - `Report a Bug `_ - - `Users Mailing List `_ - - - Indices and tables - >>>>>>> cqlengine/master - ================== - - * :ref:`genindex` - * :ref:`modindex` - * :ref:`search` diff --git a/docs/object_mapper.rst b/docs/object_mapper.rst new file mode 100644 index 0000000000..3695c5e6db --- /dev/null +++ b/docs/object_mapper.rst @@ -0,0 +1,102 @@ +Object Mapper +============= + +cqlengine is the Cassandra CQL 3 Object Mapper packaged with this driver + +:ref:`getting-started` + +Contents +-------- +:doc:`cqlengine/models` + Mapping objects to tables + +:doc:`cqlengine/queryset` + Query sets, filtering + +:doc:`cqlengine/batches` + Batch mutations + +:doc:`cqlengine/connection` + Managing connections + +:doc:`cqlengine/manage_schemas` + +:doc:`cqlengine/external_resources` + +:doc:`cqlengine/related_projects` + +:doc:`cqlengine/third_party` + +:doc:`cqlengine/development` + +:doc:`cqlengine/faq` + +.. toctree:: + :hidden: + + cqlengine/models + cqlengine/queryset + cqlengine/batches + +.. _getting-started: + +Getting Started +--------------- + + .. code-block:: python + + import uuid + from cassandra.cqlengine import columns + from cassandra.cqlengine import connection + from datetime import datetime + from cassandra.cqlengine.management import create_keyspace, sync_table + from cassandra.cqlengine.models import Model + + #first, define a model + class ExampleModel(Model): + example_id = columns.UUID(primary_key=True, default=uuid.uuid4) + example_type = columns.Integer(index=True) + created_at = columns.DateTime() + description = columns.Text(required=False) + + #next, setup the connection to your cassandra server(s)... + # see http://datastax.github.io/python-driver/api/cassandra/cluster.html for options + # the list of hosts will be passed to create a Cluster() instance + connection.setup(['127.0.0.1'], "cqlengine", protocol_version=3) + + #...and create your CQL table + >>> sync_table(ExampleModel) + + #now we can create some rows: + >>> em1 = ExampleModel.create(example_type=0, description="example1", created_at=datetime.now()) + >>> em2 = ExampleModel.create(example_type=0, description="example2", created_at=datetime.now()) + >>> em3 = ExampleModel.create(example_type=0, description="example3", created_at=datetime.now()) + >>> em4 = ExampleModel.create(example_type=0, description="example4", created_at=datetime.now()) + >>> em5 = ExampleModel.create(example_type=1, description="example5", created_at=datetime.now()) + >>> em6 = ExampleModel.create(example_type=1, description="example6", created_at=datetime.now()) + >>> em7 = ExampleModel.create(example_type=1, description="example7", created_at=datetime.now()) + >>> em8 = ExampleModel.create(example_type=1, description="example8", created_at=datetime.now()) + + #and now we can run some queries against our table + >>> ExampleModel.objects.count() + 8 + >>> q = ExampleModel.objects(example_type=1) + >>> q.count() + 4 + >>> for instance in q: + >>> print instance.description + example5 + example6 + example7 + example8 + + #here we are applying additional filtering to an existing query + #query objects are immutable, so calling filter returns a new + #query object + >>> q2 = q.filter(example_id=em5.example_id) + + >>> q2.count() + 1 + >>> for instance in q2: + >>> print instance.description + example5 From 077b876fdd38af097ef4bc1ea99c86d7f4a5376d Mon Sep 17 00:00:00 2001 From: Tyler Hobbs Date: Fri, 6 Feb 2015 18:53:43 -0600 Subject: [PATCH 1207/3726] Fix index target for collection indexes --- cassandra/cqltypes.py | 15 ++++++++++++ cassandra/metadata.py | 21 +++++++++++++++- tests/integration/standard/test_metadata.py | 27 +++++++++++++++++++++ 3 files changed, 62 insertions(+), 1 deletion(-) diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index 8a06560c4c..39be9432aa 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -958,6 +958,21 @@ def serialize_safe(cls, val, protocol_version): return subtype.to_binary(val, protocol_version) +class FrozenType(_ParameterizedType): + typename = "frozen" + num_subtypes = 1 + + @classmethod + def deserialize_safe(cls, byts, protocol_version): + subtype, = cls.subtypes + return subtype.from_binary(byts) + + @classmethod + def serialize_safe(cls, val, protocol_version): + subtype, = cls.subtypes + return subtype.to_binary(val, protocol_version) + + def is_counter_type(t): if isinstance(t, six.string_types): t = lookup_casstype(t) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index f51b9c9323..d13673a886 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -1210,11 +1210,30 @@ def as_cql_query(self): """ table = self.column.table if self.index_type != "CUSTOM": + index_target = protect_name(self.column.name) + if self.index_options is not None: + option_keys = self.index_options.keys() + if "index_keys" in option_keys: + index_target = 'keys(%s)' % (index_target,) + elif "index_values" in option_keys: + # don't use any "function" for collection values + pass + else: + # it might be a "full" index on a frozen collection, but + # we need to check the data type to verify that, because + # there is no special index option for full-collection + # indexes. + data_type = self.column.data_type + collection_types = ('map', 'set', 'list') + if data_type.typename == "frozen" and data_type.subtypes[0].typename in collection_types: + # no index option for full-collection index + index_target = 'full(%s)' % (index_target,) + return "CREATE INDEX %s ON %s.%s (%s)" % ( self.name, # Cassandra doesn't like quoted index names for some reason protect_name(table.keyspace.name), protect_name(table.name), - protect_name(self.column.name)) + index_target) else: return "CREATE CUSTOM INDEX %s ON %s.%s (%s) USING '%s'" % ( self.name, # Cassandra doesn't like quoted index names for some reason diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 1e46d3c55a..834887d533 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -308,6 +308,33 @@ def test_indexes(self): self.assertIn('CREATE INDEX d_index', statement) self.assertIn('CREATE INDEX e_index', statement) + def test_collection_indexes(self): + self.session.execute("CREATE TABLE %s.%s (a int PRIMARY KEY, b map)" + % (self.ksname, self.cfname)) + self.session.execute("CREATE INDEX index1 ON %s.%s (keys(b))" + % (self.ksname, self.cfname)) + + tablemeta = self.get_table_metadata() + self.assertIn('(keys(b))', tablemeta.export_as_string()) + + self.session.execute("DROP INDEX %s.index1" % (self.ksname,)) + self.session.execute("CREATE INDEX index2 ON %s.%s (b)" + % (self.ksname, self.cfname)) + + tablemeta = self.get_table_metadata() + self.assertIn(' (b)', tablemeta.export_as_string()) + + # test full indexes on frozen collections, if available + if get_server_versions()[0] >= (2, 1, 3): + self.session.execute("DROP TABLE %s.%s" % (self.ksname, self.cfname)) + self.session.execute("CREATE TABLE %s.%s (a int PRIMARY KEY, b frozen>)" + % (self.ksname, self.cfname)) + self.session.execute("CREATE INDEX index3 ON %s.%s (full(b))" + % (self.ksname, self.cfname)) + + tablemeta = self.get_table_metadata() + self.assertIn('(full(b))', tablemeta.export_as_string()) + def test_compression_disabled(self): create_statement = self.make_create_statement(["a"], ["b"], ["c"]) create_statement += " WITH compression = {}" From d7a5f0a3f596b4f04c620e77f6c283968bbaa58e Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 9 Feb 2015 14:31:39 -0600 Subject: [PATCH 1208/3726] Further reorg of cqlengine documentation --- cassandra/cqlengine/connection.py | 20 +- cassandra/cqlengine/management.py | 48 +++-- docs/api/cassandra/cqlengine/connection.rst | 7 + docs/api/cassandra/cqlengine/management.rst | 15 ++ docs/api/cassandra/cqlengine/models.rst | 1 + docs/api/index.rst | 4 + docs/conf.py | 2 +- docs/cqlengine/columns.rst | 206 -------------------- docs/cqlengine/connection.rst | 27 --- docs/cqlengine/development.rst | 47 ----- docs/cqlengine/external_resources.rst | 22 --- docs/cqlengine/faq.rst | 2 +- docs/cqlengine/manage_schemas.rst | 47 ----- docs/cqlengine/models.rst | 6 +- docs/cqlengine/queryset.rst | 2 +- docs/cqlengine/related_projects.rst | 22 --- docs/object_mapper.rst | 23 +-- 17 files changed, 86 insertions(+), 415 deletions(-) create mode 100644 docs/api/cassandra/cqlengine/connection.rst create mode 100644 docs/api/cassandra/cqlengine/management.rst delete mode 100644 docs/cqlengine/columns.rst delete mode 100644 docs/cqlengine/connection.rst delete mode 100644 docs/cqlengine/development.rst delete mode 100644 docs/cqlengine/external_resources.rst delete mode 100644 docs/cqlengine/manage_schemas.rst delete mode 100644 docs/cqlengine/related_projects.rst diff --git a/cassandra/cqlengine/connection.py b/cassandra/cqlengine/connection.py index d2af8caeab..71c6eddd25 100644 --- a/cassandra/cqlengine/connection.py +++ b/cassandra/cqlengine/connection.py @@ -30,18 +30,14 @@ def setup( retry_connect=False, **kwargs): """ - Records the hosts and connects to one of them - - :param hosts: list of hosts, see http://datastax.github.io/python-driver/api/cassandra/cluster.html - :type hosts: list - :param default_keyspace: The default keyspace to use - :type default_keyspace: str - :param consistency: The global consistency level - :type consistency: int - :param lazy_connect: True if should not connect until first use - :type lazy_connect: bool - :param retry_connect: bool - :param retry_connect: True if we should retry to connect even if there was a connection failure initially + Setup a the driver connection used by the mapper + + :param list hosts: list of hosts, see http://datastax.github.io/python-driver/api/cassandra/cluster.html + :param str default_keyspace: The default keyspace to use + :param int consistency: The global default :class:`~.ConsistencyLevel` + :param bool lazy_connect: True if should not connect until first use + :param bool retry_connect: True if we should retry to connect even if there was a connection failure initially + :param \*\*kwargs: Pass-through keyword arguments for :class:`cassandra.cluster.Cluster` """ global cluster, session, default_consistency_level, lazy_connect_args diff --git a/cassandra/cqlengine/management.py b/cassandra/cqlengine/management.py index 58f4a871bb..f9c963a0d5 100644 --- a/cassandra/cqlengine/management.py +++ b/cassandra/cqlengine/management.py @@ -19,13 +19,21 @@ def create_keyspace(name, strategy_class, replication_factor, durable_writes=True, **replication_values): """ - creates a keyspace + *Deprecated - this will likely be repaced with something specialized per replication strategy.* + Creates a keyspace - :param name: name of keyspace to create - :param strategy_class: keyspace replication strategy class - :param replication_factor: keyspace replication factor - :param durable_writes: 1.2 only, write log is bypassed if set to False - :param **replication_values: 1.2 only, additional values to ad to the replication data map + If the keyspace already exists, it will not be modified. + + **This function should be used with caution, especially in production environments. + Take care to execute schema modifications in a single context (i.e. not concurrently with other clients).** + + *There are plans to guard schema-modifying functions with an environment-driven conditional.* + + :param str name: name of keyspace to create + :param str strategy_class: keyspace replication strategy class (:attr:`~.SimpleStrategy` or :attr:`~.NetworkTopologyStrategy` + :param int replication_factor: keyspace replication factor, used with :attr:`~.SimpleStrategy` + :param bool durable_writes: Write log is bypassed if set to False + :param \*\*replication_values: Additional values to ad to the replication options map """ cluster = get_cluster() @@ -54,19 +62,31 @@ def create_keyspace(name, strategy_class, replication_factor, durable_writes=Tru def delete_keyspace(name): + """ + *There are plans to guard schema-modifying functions with an environment-driven conditional.* + + **This function should be used with caution, especially in production environments. + Take care to execute schema modifications in a single context (i.e. not concurrently with other clients).** + + Drops a keyspace, if it exists. + + :param str name: name of keyspace to delete + """ cluster = get_cluster() if name in cluster.metadata.keyspaces: execute("DROP KEYSPACE {}".format(name)) -def create_table(model): - raise CQLEngineException("create_table is deprecated, please use sync_table") - def sync_table(model): """ Inspects the model and creates / updates the corresponding table and columns. Note that the attributes removed from the model are not deleted on the database. They become effectively ignored by (will not show up on) the model. + + **This function should be used with caution, especially in production environments. + Take care to execute schema modifications in a single context (i.e. not concurrently with other clients).** + + *There are plans to guard schema-modifying functions with an environment-driven conditional.* """ if not issubclass(model, Model): @@ -298,11 +318,15 @@ def update_compaction(model): return False -def delete_table(model): - raise CQLEngineException("delete_table has been deprecated in favor of drop_table()") +def drop_table(model): + """ + Drops the table indicated by the model, if it exists. + **This function should be used with caution, especially in production environments. + Take care to execute schema modifications in a single context (i.e. not concurrently with other clients).** -def drop_table(model): + *There are plans to guard schema-modifying functions with an environment-driven conditional.* + """ # don't try to delete non existant tables meta = get_cluster().metadata diff --git a/docs/api/cassandra/cqlengine/connection.rst b/docs/api/cassandra/cqlengine/connection.rst new file mode 100644 index 0000000000..960998c58e --- /dev/null +++ b/docs/api/cassandra/cqlengine/connection.rst @@ -0,0 +1,7 @@ +``cassandra.cqlengine.connection`` - Connection management for cqlengine +======================================================================== + +.. module:: cassandra.cqlengine.connection + +.. autofunction:: setup + diff --git a/docs/api/cassandra/cqlengine/management.rst b/docs/api/cassandra/cqlengine/management.rst new file mode 100644 index 0000000000..d1a82d7d09 --- /dev/null +++ b/docs/api/cassandra/cqlengine/management.rst @@ -0,0 +1,15 @@ +``cassandra.cqlengine.management`` - Schema management for cqlengine +======================================================================== + +.. module:: cassandra.cqlengine.management + +A collection of functions for managing keyspace and table schema. + +.. autofunction:: create_keyspace + +.. autofunction:: delete_keyspace + +.. autofunction:: sync_table + +.. autofunction:: drop_table + diff --git a/docs/api/cassandra/cqlengine/models.rst b/docs/api/cassandra/cqlengine/models.rst index 585b8d113f..6dd255f98a 100644 --- a/docs/api/cassandra/cqlengine/models.rst +++ b/docs/api/cassandra/cqlengine/models.rst @@ -10,6 +10,7 @@ Model The initializer creates an instance of the model. Pass in keyword arguments for columns you've defined on the model. .. code-block:: python + class Person(Model): id = columns.UUID(primary_key=True) first_name = columns.Text() diff --git a/docs/api/index.rst b/docs/api/index.rst index 1ee811db4f..7fffe71259 100644 --- a/docs/api/index.rst +++ b/docs/api/index.rst @@ -25,6 +25,8 @@ Core Driver cassandra/io/geventreactor cassandra/io/twistedreactor +.. _om_api: + Object Mapper ------------- .. toctree:: @@ -33,3 +35,5 @@ Object Mapper cassandra/cqlengine/models cassandra/cqlengine/columns cassandra/cqlengine/query + cassandra/cqlengine/connection + cassandra/cqlengine/management diff --git a/docs/conf.py b/docs/conf.py index 939a537dbd..988c27ca59 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -125,7 +125,7 @@ # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] +html_static_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. diff --git a/docs/cqlengine/columns.rst b/docs/cqlengine/columns.rst deleted file mode 100644 index ac4b19a370..0000000000 --- a/docs/cqlengine/columns.rst +++ /dev/null @@ -1,206 +0,0 @@ -======= -Columns -======= - -.. module:: cqlengine.columns - -.. class:: Bytes() - - Stores arbitrary bytes (no validation), expressed as hexadecimal :: - - columns.Bytes() - - -.. class:: Ascii() - - Stores a US-ASCII character string :: - - columns.Ascii() - - -.. class:: Text() - - Stores a UTF-8 encoded string :: - - columns.Text() - - **options** - - :attr:`~columns.Text.min_length` - Sets the minimum length of this string. If this field is not set , and the column is not a required field, it defaults to 0, otherwise 1. - - :attr:`~columns.Text.max_length` - Sets the maximum length of this string. Defaults to None - -.. class:: Integer() - - Stores a 32-bit signed integer value :: - - columns.Integer() - -.. class:: BigInt() - - Stores a 64-bit signed long value :: - - columns.BigInt() - -.. class:: VarInt() - - Stores an arbitrary-precision integer :: - - columns.VarInt() - -.. class:: DateTime() - - Stores a datetime value. - - columns.DateTime() - -.. class:: UUID() - - Stores a type 1 or type 4 UUID. - - columns.UUID() - -.. class:: TimeUUID() - - Stores a UUID value as the cql type 'timeuuid' :: - - columns.TimeUUID() - - .. classmethod:: from_datetime(dt) - - generates a TimeUUID for the given datetime - - :param dt: the datetime to create a time uuid from - :type dt: datetime.datetime - - :returns: a time uuid created from the given datetime - :rtype: uuid1 - -.. class:: Boolean() - - Stores a boolean True or False value :: - - columns.Boolean() - -.. class:: Float() - - Stores a floating point value :: - - columns.Float() - - **options** - - :attr:`~columns.Float.double_precision` - If True, stores a double precision float value, otherwise single precision. Defaults to True. - -.. class:: Decimal() - - Stores a variable precision decimal value :: - - columns.Decimal() - -.. class:: Counter() - - Counters can be incremented and decremented :: - - columns.Counter() - - -Collection Type Columns ----------------------------- - - CQLEngine also supports container column types. Each container column requires a column class argument to specify what type of objects it will hold. The Map column requires 2, one for the key, and the other for the value - - *Example* - - .. code-block:: python - - class Person(Model): - id = columns.UUID(primary_key=True, default=uuid.uuid4) - first_name = columns.Text() - last_name = columns.Text() - - friends = columns.Set(columns.Text) - enemies = columns.Set(columns.Text) - todo_list = columns.List(columns.Text) - birthdays = columns.Map(columns.Text, columns.DateTime) - - - -.. class:: Set() - - Stores a set of unordered, unique values. Available only with Cassandra 1.2 and above :: - - columns.Set(value_type) - - **options** - - :attr:`~columns.Set.value_type` - The type of objects the set will contain - - :attr:`~columns.Set.strict` - If True, adding this column will raise an exception during save if the value is not a python `set` instance. If False, it will attempt to coerce the value to a set. Defaults to True. - -.. class:: List() - - Stores a list of ordered values. Available only with Cassandra 1.2 and above :: - - columns.List(value_type) - - **options** - - :attr:`~columns.List.value_type` - The type of objects the set will contain - -.. class:: Map() - - Stores a map (dictionary) collection, available only with Cassandra 1.2 and above :: - - columns.Map(key_type, value_type) - - **options** - - :attr:`~columns.Map.key_type` - The type of the map keys - - :attr:`~columns.Map.value_type` - The type of the map values - -Column Options -============== - - Each column can be defined with optional arguments to modify the way they behave. While some column types may define additional column options, these are the options that are available on all columns: - - .. attribute:: BaseColumn.primary_key - - If True, this column is created as a primary key field. A model can have multiple primary keys. Defaults to False. - - *In CQL, there are 2 types of primary keys: partition keys and clustering keys. As with CQL, the first primary key is the partition key, and all others are clustering keys, unless partition keys are specified manually using* :attr:`BaseColumn.partition_key` - - .. attribute:: BaseColumn.partition_key - - If True, this column is created as partition primary key. There may be many partition keys defined, forming a *composite partition key* - - .. attribute:: BaseColumn.index - - If True, an index will be created for this column. Defaults to False. - - *Note: Indexes can only be created on models with one primary key* - - .. attribute:: BaseColumn.db_field - - Explicitly sets the name of the column in the database table. If this is left blank, the column name will be the same as the name of the column attribute. Defaults to None. - - .. attribute:: BaseColumn.default - - The default value for this column. If a model instance is saved without a value for this column having been defined, the default value will be used. This can be either a value or a callable object (ie: datetime.now is a valid default argument). - - .. attribute:: BaseColumn.required - - If True, this model cannot be saved without a value defined for this column. Defaults to False. Primary key fields cannot have their required fields set to False. - - .. attribute:: BaseColumn.clustering_order - - Defines CLUSTERING ORDER for this column (valid choices are "asc" (default) or "desc"). It may be specified only for clustering primary keys - more: http://www.datastax.com/docs/1.2/cql_cli/cql/CREATE_TABLE#using-clustering-order diff --git a/docs/cqlengine/connection.rst b/docs/cqlengine/connection.rst deleted file mode 100644 index 40a685b688..0000000000 --- a/docs/cqlengine/connection.rst +++ /dev/null @@ -1,27 +0,0 @@ -========== -Connection -========== - -.. module:: cqlengine.connection - -The setup function in `cqlengine.connection` records the Cassandra servers to connect to. -If there is a problem with one of the servers, cqlengine will try to connect to each of the other connections before failing. - -.. function:: setup(hosts) - - :param hosts: list of hosts, strings in the :, or just - :type hosts: list - - :param default_keyspace: keyspace to default to - :type default_keyspace: str - - :param consistency: the consistency level of the connection, defaults to 'ONE' - :type consistency: int - - # see http://datastax.github.io/python-driver/api/cassandra.html#cassandra.ConsistencyLevel - - Records the hosts and connects to one of them - -See the example at :ref:`getting-started` - - diff --git a/docs/cqlengine/development.rst b/docs/cqlengine/development.rst deleted file mode 100644 index b3a4112507..0000000000 --- a/docs/cqlengine/development.rst +++ /dev/null @@ -1,47 +0,0 @@ -================== -Development -================== - -Travis CI -================ - -Tests are run using Travis CI using a Matrix to test different Cassandra and Python versions. It is located here: https://travis-ci.org/cqlengine/cqlengine - -Python versions: - -- 2.7 -- 3.4 - -Cassandra vesions: - -- 1.2 (protocol_version 1) -- 2.0 (protocol_version 2) -- 2.1 (upcoming, protocol_version 3) - -Pull Requests -=============== -Only Pull Requests that have passed the entire matrix will be considered for merge into the main codebase. - -Please see the contributing guidelines: https://github.com/cqlengine/cqlengine/blob/master/CONTRIBUTING.md - - -Testing Locally -================= - -Before testing, you'll need to set an environment variable to the version of Cassandra that's being tested. The version cooresponds to the release, so for example if you're testing against Cassandra 2.1, you'd set the following: - -.. code-block:: bash - - export CASSANDRA_VERSION=20 - -At the command line, execute: - -.. code-block:: bash - - bin/test.py - -This is a wrapper for nose that also sets up the database connection. - - - - diff --git a/docs/cqlengine/external_resources.rst b/docs/cqlengine/external_resources.rst deleted file mode 100644 index 58da23d9c3..0000000000 --- a/docs/cqlengine/external_resources.rst +++ /dev/null @@ -1,22 +0,0 @@ -============================ -External Resources -============================ - -Video Tutorials -================ - -Introduction to CQLEngine: https://www.youtube.com/watch?v=zrbQcPNMbB0 - -TimeUUID and Table Polymorphism: https://www.youtube.com/watch?v=clXN9pnakvI - - -Blog Posts -=========== - -Blake Eggleston on Table Polymorphism in the .8 release: http://blakeeggleston.com/cqlengine-08-released.html - - - - - - diff --git a/docs/cqlengine/faq.rst b/docs/cqlengine/faq.rst index 9c3c7beb3f..17b6ed9bab 100644 --- a/docs/cqlengine/faq.rst +++ b/docs/cqlengine/faq.rst @@ -3,7 +3,7 @@ Frequently Asked Questions ========================== Q: Why don't updates work correctly on models instantiated as Model(field=blah, field2=blah2)? -------------------------------------------------------------------- +---------------------------------------------------------------------------------------------- A: The recommended way to create new rows is with the models .create method. The values passed into a model's init method are interpreted by the model as the values as they were read from a row. This allows the model to "know" which rows have changed since the row was read out of cassandra, and create suitable update statements. diff --git a/docs/cqlengine/manage_schemas.rst b/docs/cqlengine/manage_schemas.rst deleted file mode 100644 index 5cedf8d75b..0000000000 --- a/docs/cqlengine/manage_schemas.rst +++ /dev/null @@ -1,47 +0,0 @@ -================ -Managing Schemas -================ - -**Users of versions < 0.4, please read this post before upgrading:** `Breaking Changes`_ - -.. _Breaking Changes: https://groups.google.com/forum/?fromgroups#!topic/cqlengine-users/erkSNe1JwuU - -.. module:: cqlengine.connection - -.. module:: cqlengine.management - -Once a connection has been made to Cassandra, you can use the functions in ``cqlengine.management`` to create and delete keyspaces, as well as create and delete tables for defined models - -.. function:: create_keyspace(name) - - :param name: the keyspace name to create - :type name: string - - creates a keyspace with the given name - -.. function:: delete_keyspace(name) - - :param name: the keyspace name to delete - :type name: string - - deletes the keyspace with the given name - -.. function:: sync_table(model) - - :param model: the :class:`~cqlengine.model.Model` class to make a table with - :type model: :class:`~cqlengine.model.Model` - - syncs a python model to cassandra (creates & alters) - -.. function:: drop_table(model) - - :param model: the :class:`~cqlengine.model.Model` class to delete a column family for - :type model: :class:`~cqlengine.model.Model` - - deletes the CQL table for the given model - - - -See the example at :ref:`getting-started` - - diff --git a/docs/cqlengine/models.rst b/docs/cqlengine/models.rst index aafd31d146..8ee30c365d 100644 --- a/docs/cqlengine/models.rst +++ b/docs/cqlengine/models.rst @@ -20,8 +20,8 @@ This example defines a ``Person`` table, with the columns ``first_name`` and ``l .. code-block:: python - from cassandra.cqlengine import columns - from cassandra.cqlengine.models import Model + from cassandra.cqlengine import columns + from cassandra.cqlengine.models import Model class Person(Model): id = columns.UUID(primary_key=True) @@ -72,7 +72,7 @@ To sync the models to the database, you may do the following*: sync_table(Comment) \*Note: synchronizing models causes schema changes, and should be done with caution. -Please see the discussion in :doc:`manage_schemas` for considerations. +Please see the discussion in :doc:`/api/cassandra/cqlengine/management` for considerations. For examples on manipulating data and creating queries, see :doc:`queryset` diff --git a/docs/cqlengine/queryset.rst b/docs/cqlengine/queryset.rst index b88c908aac..23c9226715 100644 --- a/docs/cqlengine/queryset.rst +++ b/docs/cqlengine/queryset.rst @@ -222,7 +222,7 @@ Token Function next_page = list(query.filter(pk__token__gt=cqlengine.Token(last.pk))) QuerySets are immutable -====================== +======================= When calling any method that changes a queryset, the method does not actually change the queryset object it's called on, but returns a new queryset object with the attributes of the original queryset, plus the attributes added in the method call. diff --git a/docs/cqlengine/related_projects.rst b/docs/cqlengine/related_projects.rst deleted file mode 100644 index eff06b3442..0000000000 --- a/docs/cqlengine/related_projects.rst +++ /dev/null @@ -1,22 +0,0 @@ -================================== -Related Projects -================================== - -Cassandra Native Driver -========================= - -- Docs: http://datastax.github.io/python-driver/api/index.html -- Github: https://github.com/datastax/python-driver -- Pypi: https://pypi.python.org/pypi/cassandra-driver/2.1.0 - - -Sphinx Contrib Module -========================= - -- Github https://github.com/dokai/sphinxcontrib-cqlengine - - -Django Integration -========================= - -- Github https://cqlengine.readthedocs.org diff --git a/docs/object_mapper.rst b/docs/object_mapper.rst index 3695c5e6db..2e982a5012 100644 --- a/docs/object_mapper.rst +++ b/docs/object_mapper.rst @@ -3,31 +3,24 @@ Object Mapper cqlengine is the Cassandra CQL 3 Object Mapper packaged with this driver -:ref:`getting-started` +:ref:`Jump to Getting Started ` Contents -------- :doc:`cqlengine/models` - Mapping objects to tables + Examples defining models, and mapping them to tables :doc:`cqlengine/queryset` - Query sets, filtering + Overview of query sets and filtering :doc:`cqlengine/batches` - Batch mutations + Working with batch mutations -:doc:`cqlengine/connection` - Managing connections - -:doc:`cqlengine/manage_schemas` - -:doc:`cqlengine/external_resources` - -:doc:`cqlengine/related_projects` +:ref:`API Documentation ` + Index of API documentation :doc:`cqlengine/third_party` - -:doc:`cqlengine/development` + High-level examples in Celery and uWSGI :doc:`cqlengine/faq` @@ -37,6 +30,8 @@ Contents cqlengine/models cqlengine/queryset cqlengine/batches + cqlengine/third_party + cqlengine/faq .. _getting-started: From a9a0dd5e7cdaef707e99e8381d8c3b7bdeb4caa4 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 9 Feb 2015 14:37:52 -0600 Subject: [PATCH 1209/3726] Add note about different version support in cqlengine --- docs/index.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/index.rst b/docs/index.rst index 48ee59cdce..e28b00685b 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -4,7 +4,7 @@ A Python client driver for `Apache Cassandra `_. This driver works exclusively with the Cassandra Query Language v3 (CQL3) and Cassandra's native protocol. Cassandra 1.2+ is supported. -The driver supports Python 2.6, 2.7, 3.3, and 3.4. +The core driver supports Python 2.6, 2.7, 3.3, and 3.4. The object mapper is presently supported in 2.7+. This driver is open source under the `Apache v2 License `_. From 9b508b610656f79ba981ce3cd37a70f06642cf9c Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 9 Feb 2015 14:38:24 -0600 Subject: [PATCH 1210/3726] Add cqlengine package and test requirements to setup script --- setup.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/setup.py b/setup.py index b408edf4e0..dba353fbf2 100644 --- a/setup.py +++ b/setup.py @@ -205,11 +205,11 @@ def run_setup(extensions): url='http://github.com/datastax/python-driver', author='Tyler Hobbs', author_email='tyler@datastax.com', - packages=['cassandra', 'cassandra.io'], # TODO: add cqlengine after moving + packages=['cassandra', 'cassandra.io', 'cassandra.cqlengine'], keywords='cassandra,cql,orm', include_package_data=True, install_requires=dependencies, - tests_require=['nose', 'mock', 'PyYAML', 'pytz'], + tests_require=['nose', 'mock', 'PyYAML', 'pytz', 'sure'], classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', From cf29b3972900d4a2149f751261e85a7802f57ddb Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 9 Feb 2015 16:00:08 -0600 Subject: [PATCH 1211/3726] Make unit tests work with latest version of pypy --- tests/unit/test_policies.py | 7 ++++--- tests/unit/test_types.py | 3 ++- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/tests/unit/test_policies.py b/tests/unit/test_policies.py index bd8426cd50..ba389c7662 100644 --- a/tests/unit/test_policies.py +++ b/tests/unit/test_policies.py @@ -140,20 +140,21 @@ def host_down(): # make the GIL switch after every instruction, maximizing # the chace of race conditions - if six.PY2: + check = six.PY2 or '__pypy__' in sys.builtin_module_names + if check: original_interval = sys.getcheckinterval() else: original_interval = sys.getswitchinterval() try: - if six.PY2: + if check: sys.setcheckinterval(0) else: sys.setswitchinterval(0.0001) map(lambda t: t.start(), threads) map(lambda t: t.join(), threads) finally: - if six.PY2: + if check: sys.setcheckinterval(original_interval) else: sys.setswitchinterval(original_interval) diff --git a/tests/unit/test_types.py b/tests/unit/test_types.py index dc43eda1f9..4588b85683 100644 --- a/tests/unit/test_types.py +++ b/tests/unit/test_types.py @@ -20,6 +20,7 @@ from binascii import unhexlify import datetime +import six import time import cassandra from cassandra.cqltypes import (BooleanType, lookup_casstype_simple, lookup_casstype, @@ -146,7 +147,7 @@ def __init__(self, subtypes, names): @classmethod def apply_parameters(cls, subtypes, names): - return cls(subtypes, [unhexlify(name) if name is not None else name for name in names]) + return cls(subtypes, [unhexlify(six.b(name)) if name is not None else name for name in names]) class BarType(FooType): typename = 'org.apache.cassandra.db.marshal.BarType' From b3243f78b5bf14095db6a7f1f48134a23c579fc2 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 9 Feb 2015 17:20:35 -0600 Subject: [PATCH 1212/3726] Integrate cqlengine tests into tox.ini --- tests/integration/__init__.py | 18 ++++++---- tests/integration/long/__init__.py | 5 ++- tests/integration/standard/__init__.py | 5 ++- tox.ini | 47 ++++++-------------------- 4 files changed, 30 insertions(+), 45 deletions(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 0658a29dbb..9ca0266c8a 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -35,7 +35,7 @@ from ccmlib.cluster_factory import ClusterFactory as CCMClusterFactory from ccmlib import common except ImportError as e: - raise unittest.SkipTest('ccm is a dependency for integration tests:', e) + CCMClusterFactory = None CLUSTER_NAME = 'test_cluster' SINGLE_NODE_CLUSTER_NAME = 'single_node' @@ -81,11 +81,12 @@ def _tuple_version(version_string): return tuple([int(p) for p in version_string.split('.')]) -USE_CCM_CASS_EXTERNAL = bool(os.getenv('USE_CCM_CASS_EXTERNAL', False)) +USE_CASS_EXTERNAL = bool(os.getenv('USE_CASS_EXTERNAL', False)) default_cassandra_version = '2.1.2' -if USE_CCM_CASS_EXTERNAL: +if USE_CASS_EXTERNAL and CCMClusterFactory: + # see if the external instance is running in ccm path = common.get_default_path() name = common.current_cluster_name(path) CCM_CLUSTER = CCMClusterFactory.load(common.get_default_path(), name) @@ -136,7 +137,7 @@ def use_single_node(start=True): def remove_cluster(): - if USE_CCM_CASS_EXTERNAL: + if USE_CASS_EXTERNAL: return global CCM_CLUSTER @@ -157,8 +158,11 @@ def is_current_cluster(cluster_name, node_counts): def use_cluster(cluster_name, nodes, ipformat=None, start=True): global CCM_CLUSTER - if USE_CCM_CASS_EXTERNAL: - log.debug("Using external cluster %s", CCM_CLUSTER.name) + if USE_CASS_EXTERNAL: + if CCM_CLUSTER: + log.debug("Using external ccm cluster %s", CCM_CLUSTER.name) + else: + log.debug("Using unnamed external cluster") return if is_current_cluster(cluster_name, nodes): @@ -194,7 +198,7 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True): def teardown_package(): - if USE_CCM_CASS_EXTERNAL: + if USE_CASS_EXTERNAL: return # when multiple modules are run explicitly, this runs between them # need to make sure CCM_CLUSTER is properly cleared for that case diff --git a/tests/integration/long/__init__.py b/tests/integration/long/__init__.py index 6df2a7312b..76954be148 100644 --- a/tests/integration/long/__init__.py +++ b/tests/integration/long/__init__.py @@ -11,4 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +try: + from ccmlib import common +except ImportError as e: + raise unittest.SkipTest('ccm is a dependency for integration tests:', e) diff --git a/tests/integration/standard/__init__.py b/tests/integration/standard/__init__.py index 6df2a7312b..76954be148 100644 --- a/tests/integration/standard/__init__.py +++ b/tests/integration/standard/__init__.py @@ -11,4 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +try: + from ccmlib import common +except ImportError as e: + raise unittest.SkipTest('ccm is a dependency for integration tests:', e) diff --git a/tox.ini b/tox.ini index 9f99fec835..6c4aa31a27 100644 --- a/tox.ini +++ b/tox.ini @@ -1,55 +1,30 @@ [tox] envlist = py26,py27,pypy,py33,py34 -[testenv] +[base] deps = nose mock - ccm unittest2 - pip PyYAML six - blist -commands = {envpython} setup.py build_ext --inplace - nosetests --verbosity=2 tests/unit/ -[testenv:py33] -deps = nose - mock - pip - PyYAML - six - scales +[testenv] +deps = {[base]deps} blist + sure +setenv = USE_CASS_EXTERNAL=1 commands = {envpython} setup.py build_ext --inplace nosetests --verbosity=2 tests/unit/ + nosetests --verbosity=2 tests/integration/cqlengine -[testenv:py34] -deps = nose - mock - pip - PyYAML - six - scales - blist +[testenv:py26] commands = {envpython} setup.py build_ext --inplace nosetests --verbosity=2 tests/unit/ +# no cqlengine support for 2.6 right now [testenv:pypy] -deps = nose - mock - ccm - unittest2 - pip - PyYAML - scales - six +deps = {[base]deps} commands = {envpython} setup.py build_ext --inplace nosetests --verbosity=2 tests/unit/ - -# TODO: integrate cqlengine tests -#envlist=py27,py34 -# -#[testenv] -#deps= -rrequirements.txt -#commands=bin/test.py --no-skip +# cqlengine/test_timestamp.py uses sure, which fails in pypy presently +# could remove sure usage From 023bd1075f4645584ecda9610fbad523163d4d4e Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 10 Feb 2015 10:18:20 -0600 Subject: [PATCH 1213/3726] Update cql collection links to more recent docs --- cassandra/cqlengine/columns.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index 1f13e3d724..d27a93863e 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -657,7 +657,7 @@ class Set(BaseContainerColumn): """ Stores a set of unordered, unique values - http://www.datastax.com/docs/1.2/cql_cli/using/collections + http://www.datastax.com/documentation/cql/3.1/cql/cql_using/use_set_t.html """ db_type = 'set<{}>' @@ -709,7 +709,7 @@ class List(BaseContainerColumn): """ Stores a list of ordered values - http://www.datastax.com/docs/1.2/cql_cli/using/collections_list + http://www.datastax.com/documentation/cql/3.1/cql/cql_using/use_list_t.html """ db_type = 'list<{}>' @@ -748,7 +748,7 @@ class Map(BaseContainerColumn): """ Stores a key -> value map (dictionary) - http://www.datastax.com/docs/1.2/cql_cli/using/collections_map + http://www.datastax.com/documentation/cql/3.1/cql/cql_using/use_map_t.html """ db_type = 'map<{}, {}>' From f078e23935947050d0070d2db15c8382c72364c8 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 10 Feb 2015 10:23:46 -0600 Subject: [PATCH 1214/3726] Make Cluster link internal reference also tweak FAQ --- cassandra/cqlengine/connection.py | 2 +- docs/cqlengine/faq.rst | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cassandra/cqlengine/connection.py b/cassandra/cqlengine/connection.py index 71c6eddd25..c87fc49524 100644 --- a/cassandra/cqlengine/connection.py +++ b/cassandra/cqlengine/connection.py @@ -32,7 +32,7 @@ def setup( """ Setup a the driver connection used by the mapper - :param list hosts: list of hosts, see http://datastax.github.io/python-driver/api/cassandra/cluster.html + :param list hosts: list of hosts, (``contact_points`` for :class:`cassandra.cluster.Cluster`) :param str default_keyspace: The default keyspace to use :param int consistency: The global default :class:`~.ConsistencyLevel` :param bool lazy_connect: True if should not connect until first use diff --git a/docs/cqlengine/faq.rst b/docs/cqlengine/faq.rst index 17b6ed9bab..c9af1d106c 100644 --- a/docs/cqlengine/faq.rst +++ b/docs/cqlengine/faq.rst @@ -2,7 +2,7 @@ Frequently Asked Questions ========================== -Q: Why don't updates work correctly on models instantiated as Model(field=blah, field2=blah2)? +Q: Why don't updates work correctly on models instantiated as Model(field=value, field2=value2)? ---------------------------------------------------------------------------------------------- A: The recommended way to create new rows is with the models .create method. The values passed into a model's init method are interpreted by the model as the values as they were read from a row. This allows the model to "know" which rows have changed since the row was read out of cassandra, and create suitable update statements. From 62c25d60e9937bc1c9faf2d1e6445d029543db5d Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 10 Feb 2015 11:47:22 -0600 Subject: [PATCH 1215/3726] PEP-8 cleanup --- cassandra/cqlengine/columns.py | 89 ++++++++++++++---------- cassandra/cqlengine/connection.py | 14 ++-- cassandra/cqlengine/exceptions.py | 30 +++++--- cassandra/cqlengine/functions.py | 5 +- cassandra/cqlengine/management.py | 53 +++++++------- cassandra/cqlengine/models.py | 112 ++++++++++++++++-------------- cassandra/cqlengine/named.py | 9 ++- cassandra/cqlengine/operators.py | 6 +- cassandra/cqlengine/query.py | 109 ++++++++++++++++------------- cassandra/cqlengine/statements.py | 58 +++++++++------- 10 files changed, 283 insertions(+), 202 deletions(-) diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index d27a93863e..2181413b3d 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -1,4 +1,3 @@ -#column field types from copy import deepcopy, copy from datetime import datetime from datetime import date @@ -86,7 +85,7 @@ def __eq__(self, other): class Column(object): - #the cassandra type this column maps to + # the cassandra type this column maps to db_type = None value_manager = BaseValueManager @@ -161,13 +160,13 @@ def __init__(self, self.required = required self.clustering_order = clustering_order self.polymorphic_key = polymorphic_key - #the column name in the model definition + # the column name in the model definition self.column_name = None self.static = static self.value = None - #keep track of instantiation order + # keep track of instantiation order self.position = Column.instance_counter Column.instance_counter += 1 @@ -266,17 +265,18 @@ def to_database(self, value): return bytearray(val) def to_python(self, value): - #return value[2:].decode('hex') return value Bytes = Blob + class Ascii(Column): """ Stores a US-ASCII character string """ db_type = 'ascii' + class Inet(Column): """ Stores an IP address in IPv4 or IPv6 format @@ -309,7 +309,8 @@ def __init__(self, *args, **kwargs): def validate(self, value): value = super(Text, self).validate(value) - if value is None: return + if value is None: + return if not isinstance(value, (six.string_types, bytearray)) and value is not None: raise ValidationError('{} {} is not a string'.format(self.column_name, type(value))) if self.max_length: @@ -330,7 +331,8 @@ class Integer(Column): def validate(self, value): val = super(Integer, self).validate(value) - if val is None: return + if val is None: + return try: return int(val) except (TypeError, ValueError): @@ -409,7 +411,8 @@ class DateTime(Column): db_type = 'timestamp' def to_python(self, value): - if value is None: return + if value is None: + return if isinstance(value, datetime): return value elif isinstance(value, date): @@ -421,7 +424,8 @@ def to_python(self, value): def to_database(self, value): value = super(DateTime, self).to_database(value) - if value is None: return + if value is None: + return if not isinstance(value, datetime): if isinstance(value, date): value = datetime(value.year, value.month, value.day) @@ -443,7 +447,8 @@ class Date(Column): db_type = 'timestamp' def to_python(self, value): - if value is None: return + if value is None: + return if isinstance(value, datetime): return value.date() elif isinstance(value, date): @@ -455,7 +460,8 @@ def to_python(self, value): def to_database(self, value): value = super(Date, self).to_database(value) - if value is None: return + if value is None: + return if isinstance(value, datetime): value = value.date() if not isinstance(value, date): @@ -474,9 +480,11 @@ class UUID(Column): def validate(self, value): val = super(UUID, self).validate(value) - if val is None: return + if val is None: + return from uuid import UUID as _UUID - if isinstance(val, _UUID): return val + if isinstance(val, _UUID): + return val if isinstance(val, six.string_types) and self.re_uuid.match(val): return _UUID(val) raise ValidationError("{} {} is not a valid uuid".format(self.column_name, value)) @@ -510,7 +518,7 @@ def from_datetime(self, dt): epoch = datetime(1970, 1, 1, tzinfo=dt.tzinfo) offset = epoch.tzinfo.utcoffset(epoch).total_seconds() if epoch.tzinfo else 0 - timestamp = (dt - epoch).total_seconds() - offset + timestamp = (dt - epoch).total_seconds() - offset node = None clock_seq = None @@ -529,7 +537,7 @@ def from_datetime(self, dt): if node is None: node = getnode() return pyUUID(fields=(time_low, time_mid, time_hi_version, - clock_seq_hi_variant, clock_seq_low, node), version=1) + clock_seq_hi_variant, clock_seq_low, node), version=1) class Boolean(Column): @@ -563,7 +571,8 @@ def __init__(self, double_precision=True, **kwargs): def validate(self, value): value = super(Float, self).validate(value) - if value is None: return + if value is None: + return try: return float(value) except (TypeError, ValueError): @@ -586,7 +595,8 @@ def validate(self, value): from decimal import Decimal as _Decimal from decimal import InvalidOperation val = super(Decimal, self).validate(value) - if val is None: return + if val is None: + return try: return _Decimal(val) except InvalidOperation: @@ -679,7 +689,8 @@ def __init__(self, value_type, strict=True, default=set, **kwargs): def validate(self, value): val = super(Set, self).validate(value) - if val is None: return + if val is None: + return types = (set,) if self.strict else (set, list, tuple) if not isinstance(val, types): if self.strict: @@ -693,18 +704,19 @@ def validate(self, value): return {self.value_col.validate(v) for v in val} def to_python(self, value): - if value is None: return set() + if value is None: + return set() return {self.value_col.to_python(v) for v in value} def to_database(self, value): - if value is None: return None + if value is None: + return None - if isinstance(value, self.Quoter): return value + if isinstance(value, self.Quoter): + return value return self.Quoter({self.value_col.to_database(v) for v in value}) - - class List(BaseContainerColumn): """ Stores a list of ordered values @@ -727,7 +739,8 @@ def __init__(self, value_type, default=list, **kwargs): def validate(self, value): val = super(List, self).validate(value) - if val is None: return + if val is None: + return if not isinstance(val, (set, list, tuple)): raise ValidationError('{} {} is not a list object'.format(self.column_name, val)) if None in val: @@ -735,12 +748,15 @@ def validate(self, value): return [self.value_col.validate(v) for v in val] def to_python(self, value): - if value is None: return [] + if value is None: + return [] return [self.value_col.to_python(v) for v in value] def to_database(self, value): - if value is None: return None - if isinstance(value, self.Quoter): return value + if value is None: + return None + if isinstance(value, self.Quoter): + return value return self.Quoter([self.value_col.to_database(v) for v in value]) @@ -757,7 +773,7 @@ class Quoter(BaseContainerQuoter): def __str__(self): cq = cql_quote - return '{' + ', '.join([cq(k) + ':' + cq(v) for k,v in self.value.items()]) + '}' + return '{' + ', '.join([cq(k) + ':' + cq(v) for k, v in self.value.items()]) + '}' def get(self, key): return self.value.get(key) @@ -802,22 +818,24 @@ def get_column_def(self): def validate(self, value): val = super(Map, self).validate(value) - if val is None: return + if val is None: + return if not isinstance(val, dict): raise ValidationError('{} {} is not a dict object'.format(self.column_name, val)) - return {self.key_col.validate(k):self.value_col.validate(v) for k,v in val.items()} + return {self.key_col.validate(k): self.value_col.validate(v) for k, v in val.items()} def to_python(self, value): if value is None: return {} if value is not None: - return {self.key_col.to_python(k): self.value_col.to_python(v) for k,v in value.items()} + return {self.key_col.to_python(k): self.value_col.to_python(v) for k, v in value.items()} def to_database(self, value): - if value is None: return None - if isinstance(value, self.Quoter): return value - return self.Quoter({self.key_col.to_database(k):self.value_col.to_database(v) for k,v in value.items()}) - + if value is None: + return None + if isinstance(value, self.Quoter): + return value + return self.Quoter({self.key_col.to_database(k): self.value_col.to_database(v) for k, v in value.items()}) class _PartitionKeysToken(Column): @@ -842,4 +860,3 @@ def to_database(self, value): def get_cql(self): return "token({})".format(", ".join(c.cql for c in self.partition_columns)) - diff --git a/cassandra/cqlengine/connection.py b/cassandra/cqlengine/connection.py index c87fc49524..d4b1dc96b5 100644 --- a/cassandra/cqlengine/connection.py +++ b/cassandra/cqlengine/connection.py @@ -1,6 +1,6 @@ from collections import namedtuple -import six import logging +import six from cassandra import ConsistencyLevel from cassandra.cluster import Cluster, _NOT_SET, NoHostAvailable @@ -9,11 +9,13 @@ from cassandra.cqlengine.statements import BaseCQLStatement -LOG = logging.getLogger('cqlengine.cql') +log = logging.getLogger(__name__) + NOT_SET = _NOT_SET # required for passing timeout to Session.execute -class CQLConnectionError(CQLEngineException): pass +class CQLConnectionError(CQLEngineException): + pass Host = namedtuple('Host', ['name', 'port']) @@ -22,6 +24,7 @@ class CQLConnectionError(CQLEngineException): pass lazy_connect_args = None default_consistency_level = None + def setup( hosts, default_keyspace, @@ -94,21 +97,24 @@ def execute(query, params=None, consistency_level=None, timeout=NOT_SET): elif isinstance(query, six.string_types): query = SimpleStatement(query, consistency_level=consistency_level) - LOG.info(query.query_string) + log.debug(query.query_string) params = params or {} result = session.execute(query, params, timeout=timeout) return result + def get_session(): handle_lazy_connect() return session + def get_cluster(): handle_lazy_connect() return cluster + def handle_lazy_connect(): global lazy_connect_args if lazy_connect_args: diff --git a/cassandra/cqlengine/exceptions.py b/cassandra/cqlengine/exceptions.py index 19b5c1fe37..8f60c90ac5 100644 --- a/cassandra/cqlengine/exceptions.py +++ b/cassandra/cqlengine/exceptions.py @@ -1,8 +1,22 @@ -#cqlengine exceptions -class CQLEngineException(Exception): pass -class ModelException(CQLEngineException): pass -class ValidationError(CQLEngineException): pass - -class UndefinedKeyspaceException(CQLEngineException): pass -class LWTException(CQLEngineException): pass -class IfNotExistsWithCounterColumn(CQLEngineException): pass +class CQLEngineException(Exception): + pass + + +class ModelException(CQLEngineException): + pass + + +class ValidationError(CQLEngineException): + pass + + +class UndefinedKeyspaceException(CQLEngineException): + pass + + +class LWTException(CQLEngineException): + pass + + +class IfNotExistsWithCounterColumn(CQLEngineException): + pass diff --git a/cassandra/cqlengine/functions.py b/cassandra/cqlengine/functions.py index 78d42479b6..c77032391f 100644 --- a/cassandra/cqlengine/functions.py +++ b/cassandra/cqlengine/functions.py @@ -5,12 +5,14 @@ from cassandra.cqlengine.exceptions import ValidationError # move to central spot + class UnicodeMixin(object): if sys.version_info > (3, 0): __str__ = lambda x: x.__unicode__() else: __str__ = lambda x: six.text_type(x).encode('utf-8') + class QueryValue(UnicodeMixin): """ Base class for query filter values. Subclasses of these classes can @@ -42,6 +44,8 @@ class BaseQueryFunction(QueryValue): be passed into .filter() and will be translated into CQL functions in the resulting query """ + pass + class MinTimeUUID(BaseQueryFunction): """ @@ -123,4 +127,3 @@ def __unicode__(self): def update_context(self, ctx): for i, (col, val) in enumerate(zip(self._columns, self.value)): ctx[str(self.context_id + i)] = col.to_database(val) - diff --git a/cassandra/cqlengine/management.py b/cassandra/cqlengine/management.py index f9c963a0d5..d5b3f8178e 100644 --- a/cassandra/cqlengine/management.py +++ b/cassandra/cqlengine/management.py @@ -12,11 +12,12 @@ Field = namedtuple('Field', ['name', 'type']) -logger = logging.getLogger(__name__) +log = logging.getLogger(__name__) # system keyspaces schema_columnfamilies = NamedTable('system', 'schema_columnfamilies') + def create_keyspace(name, strategy_class, replication_factor, durable_writes=True, **replication_values): """ *Deprecated - this will likely be repaced with something specialized per replication strategy.* @@ -38,10 +39,10 @@ def create_keyspace(name, strategy_class, replication_factor, durable_writes=Tru cluster = get_cluster() if name not in cluster.metadata.keyspaces: - #try the 1.2 method + # try the 1.2 method replication_map = { 'class': strategy_class, - 'replication_factor':replication_factor + 'replication_factor': replication_factor } replication_map.update(replication_values) if strategy_class.lower() != 'simplestrategy': @@ -76,6 +77,7 @@ def delete_keyspace(name): if name in cluster.metadata.keyspaces: execute("DROP KEYSPACE {}".format(name)) + def sync_table(model): """ Inspects the model and creates / updates the corresponding table and columns. @@ -95,8 +97,7 @@ def sync_table(model): if model.__abstract__: raise CQLEngineException("cannot create table from abstract model") - - #construct query string + # construct query string cf_name = model.column_family_name() raw_cf_name = model.column_family_name(include_keyspace=False) @@ -107,7 +108,7 @@ def sync_table(model): keyspace = cluster.metadata.keyspaces[ks_name] tables = keyspace.tables - #check for an existing column family + # check for an existing column family if raw_cf_name not in tables: qs = get_create_table(model) @@ -123,20 +124,21 @@ def sync_table(model): fields = get_fields(model) field_names = [x.name for x in fields] for name, col in model._columns.items(): - if col.primary_key or col.partition_key: continue # we can't mess with the PK - if col.db_field_name in field_names: continue # skip columns already defined + if col.primary_key or col.partition_key: + continue # we can't mess with the PK + if col.db_field_name in field_names: + continue # skip columns already defined # add missing column using the column def query = "ALTER TABLE {} add {}".format(cf_name, col.get_column_def()) - logger.debug(query) + log.debug(query) execute(query) update_compaction(model) - table = cluster.metadata.keyspaces[ks_name].tables[raw_cf_name] - indexes = [c for n,c in model._columns.items() if c.index] + indexes = [c for n, c in model._columns.items() if c.index] for column in indexes: if table.columns[column.db_field_name].index: @@ -148,20 +150,23 @@ def sync_table(model): qs = ' '.join(qs) execute(qs) + def get_create_table(model): cf_name = model.column_family_name() qs = ['CREATE TABLE {}'.format(cf_name)] - #add column types - pkeys = [] # primary keys - ckeys = [] # clustering keys - qtypes = [] # field types + # add column types + pkeys = [] # primary keys + ckeys = [] # clustering keys + qtypes = [] # field types + def add_column(col): s = col.get_column_def() if col.primary_key: keys = (pkeys if col.partition_key else ckeys) keys.append('"{}"'.format(col.db_field_name)) qtypes.append(s) + for name, col in model._columns.items(): add_column(col) @@ -172,9 +177,9 @@ def add_column(col): with_qs = [] table_properties = ['bloom_filter_fp_chance', 'caching', 'comment', - 'dclocal_read_repair_chance', 'default_time_to_live', 'gc_grace_seconds', - 'index_interval', 'memtable_flush_period_in_ms', 'populate_io_cache_on_flush', - 'read_repair_chance', 'replicate_on_write'] + 'dclocal_read_repair_chance', 'default_time_to_live', 'gc_grace_seconds', + 'index_interval', 'memtable_flush_period_in_ms', 'populate_io_cache_on_flush', + 'read_repair_chance', 'replicate_on_write'] for prop_name in table_properties: prop_value = getattr(model, '__{}__'.format(prop_name), None) if prop_value is not None: @@ -211,9 +216,9 @@ def get_compaction_options(model): if not model.__compaction__: return {} - result = {'class':model.__compaction__} + result = {'class': model.__compaction__} - def setter(key, limited_to_strategy = None): + def setter(key, limited_to_strategy=None): """ sets key in result, checking if the key is limited to either SizeTiered or Leveled :param key: one of the compaction options, like "bucket_high" @@ -270,6 +275,7 @@ def get_table_settings(model): table = cluster.metadata.keyspaces[ks].tables[table] return table + def update_compaction(model): """Updates the compaction options for the given model if necessary. @@ -279,7 +285,7 @@ def update_compaction(model): `False` otherwise. :rtype: bool """ - logger.debug("Checking %s for compaction differences", model) + log.debug("Checking %s for compaction differences", model) table = get_table_settings(model) existing_options = table.options.copy() @@ -311,7 +317,7 @@ def update_compaction(model): options = json.dumps(options).replace('"', "'") cf_name = model.column_family_name() query = "ALTER TABLE {} with compaction = {}".format(cf_name, options) - logger.debug(query) + log.debug(query) execute(query) return True @@ -339,6 +345,3 @@ def drop_table(model): execute('drop table {};'.format(model.column_family_name(include_keyspace=True))) except KeyError: pass - - - diff --git a/cassandra/cqlengine/models.py b/cassandra/cqlengine/models.py index 298e9d930d..bbb4b96454 100644 --- a/cassandra/cqlengine/models.py +++ b/cassandra/cqlengine/models.py @@ -1,4 +1,5 @@ import re +import six from cassandra.cqlengine import columns from cassandra.cqlengine.exceptions import ModelException, CQLEngineException, ValidationError @@ -7,16 +8,20 @@ from cassandra.cqlengine.query import MultipleObjectsReturned as _MultipleObjectsReturned from cassandra.util import OrderedDict -class ModelDefinitionException(ModelException): pass +class ModelDefinitionException(ModelException): + pass -class PolyMorphicModelException(ModelException): pass -DEFAULT_KEYSPACE = None +class PolyMorphicModelException(ModelException): + pass + class UndefinedKeyspaceWarning(Warning): pass +DEFAULT_KEYSPACE = None + class hybrid_classmethod(object): """ @@ -106,7 +111,7 @@ class TTLDescriptor(object): """ def __get__(self, instance, model): if instance: - #instance = copy.deepcopy(instance) + # instance = copy.deepcopy(instance) # instance method def ttl_setter(ts): instance._ttl = ts @@ -124,6 +129,7 @@ def ttl_setter(ts): def __call__(self, *args, **kwargs): raise NotImplementedError + class TimestampDescriptor(object): """ returns a query set descriptor with a timestamp specified @@ -138,10 +144,10 @@ def timestamp_setter(ts): return model.objects.timestamp - def __call__(self, *args, **kwargs): raise NotImplementedError + class IfNotExistsDescriptor(object): """ return a query set descriptor with a if_not_exists flag specified @@ -159,13 +165,14 @@ def ifnotexists_setter(ife): def __call__(self, *args, **kwargs): raise NotImplementedError + class ConsistencyDescriptor(object): """ returns a query set descriptor if called on Class, instance if it was an instance call """ def __get__(self, instance, model): if instance: - #instance = copy.deepcopy(instance) + # instance = copy.deepcopy(instance) def consistency_setter(consistency): instance.__consistency__ = consistency return instance @@ -229,7 +236,7 @@ def __get__(self, instance, owner): """ try: return instance._values[self.column.column_name].getval() - except AttributeError as e: + except AttributeError: return self.query_evaluator def __set__(self, instance, value): @@ -258,9 +265,11 @@ class BaseModel(object): The base model class, don't inherit from this, inherit from Model, defined below """ - class DoesNotExist(_DoesNotExist): pass + class DoesNotExist(_DoesNotExist): + pass - class MultipleObjectsReturned(_MultipleObjectsReturned): pass + class MultipleObjectsReturned(_MultipleObjectsReturned): + pass objects = QuerySetDescriptor() ttl = TTLDescriptor() @@ -269,7 +278,7 @@ class MultipleObjectsReturned(_MultipleObjectsReturned): pass # custom timestamps, see USING TIMESTAMP X timestamp = TimestampDescriptor() - + if_not_exists = IfNotExistsDescriptor() # _len is lazily created by __len__ @@ -302,8 +311,7 @@ class MultipleObjectsReturned(_MultipleObjectsReturned): pass __queryset__ = ModelQuerySet __dmlquery__ = DMLQuery - __consistency__ = None # can be set per query - + __consistency__ = None # can be set per query # Additional table properties __bloom_filter_fp_chance__ = None @@ -318,9 +326,9 @@ class MultipleObjectsReturned(_MultipleObjectsReturned): pass __read_repair_chance__ = None __replicate_on_write__ = None - _timestamp = None # optional timestamp to include with the operation (USING TIMESTAMP) + _timestamp = None # optional timestamp to include with the operation (USING TIMESTAMP) - _if_not_exists = False # optional if_not_exists flag to check existence before insertion + _if_not_exists = False # optional if_not_exists flag to check existence before insertion def __init__(self, **values): self._values = {} @@ -343,21 +351,19 @@ def __init__(self, **values): self._batch = None self._timeout = NOT_SET - def __repr__(self): """ Pretty printing of models by their primary key """ return '{} <{}>'.format(self.__class__.__name__, - ', '.join(('{}={}'.format(k, getattr(self, k)) for k,v in six.iteritems(self._primary_keys))) + ', '.join(('{}={}'.format(k, getattr(self, k)) for k, v in six.iteritems(self._primary_keys))) ) - - @classmethod def _discover_polymorphic_submodels(cls): if not cls._is_polymorphic_base: raise ModelException('_discover_polymorphic_submodels can only be called on polymorphic base classes') + def _discover(klass): if not klass._is_polymorphic_base and klass.__polymorphic_key__ is not None: cls._polymorphic_map[klass.__polymorphic_key__] = klass @@ -381,7 +387,7 @@ def _construct_instance(cls, values): # and translate that into our local fields # the db_map is a db_field -> model field map items = values.items() - field_dict = dict([(cls._db_map.get(k, k),v) for k,v in items]) + field_dict = dict([(cls._db_map.get(k, k), v) for k, v in items]) if cls._is_polymorphic: poly_key = field_dict.get(cls._polymorphic_column_name) @@ -421,8 +427,9 @@ def _can_update(self): :return: """ - if not self._is_persisted: return False - pks = self._primary_keys.keys() + if not self._is_persisted: + return False + return all([not self._values[k].changed for k in self._primary_keys]) @classmethod @@ -481,11 +488,14 @@ def column_family_name(cls, include_keyspace=True): ccase = lambda s: camelcase.sub(lambda v: '{}_{}'.format(v.group(1), v.group(2).lower()), s) cf_name += ccase(cls.__name__) - #trim to less than 48 characters or cassandra will complain + # trim to less than 48 characters or cassandra will complain cf_name = cf_name[-48:] cf_name = cf_name.lower() cf_name = re.sub(r'^_+', '', cf_name) - if not include_keyspace: return cf_name + + if not include_keyspace: + return cf_name + return '{}.{}'.format(cls._get_keyspace(), cf_name) def validate(self): @@ -499,8 +509,7 @@ def validate(self): val = col.validate(v) setattr(self, name, val) - ### Let an instance be used like a dict of its columns keys/values - + # Let an instance be used like a dict of its columns keys/values def __iter__(self): """ Iterate over column ids. """ for column_id in self._columns.keys(): @@ -620,7 +629,6 @@ def save(self): else: setattr(self, self._polymorphic_column_name, self.__polymorphic_key__) - is_new = self.pk is None self.validate() self.__dmlquery__(self.__class__, self, batch=self._batch, @@ -631,7 +639,7 @@ def save(self): transaction=self._transaction, timeout=self._timeout).save() - #reset the value managers + # reset the value managers for v in self._values.values(): v.reset_previous_value() self._is_persisted = True @@ -680,7 +688,7 @@ def update(self, **values): transaction=self._transaction, timeout=self._timeout).update() - #reset the value managers + # reset the value managers for v in self._values.values(): v.reset_previous_value() self._is_persisted = True @@ -704,7 +712,7 @@ def get_changed_columns(self): """ Returns a list of the columns that have been updated since instantiation or save """ - return [k for k,v in self._values.items() if v.changed] + return [k for k, v in self._values.items() if v.changed] @classmethod def _class_batch(cls, batch): @@ -715,29 +723,28 @@ def _inst_batch(self, batch): self._batch = batch return self - batch = hybrid_classmethod(_class_batch, _inst_batch) class ModelMetaClass(type): def __new__(cls, name, bases, attrs): - #move column definitions into columns dict - #and set default column names + # move column definitions into columns dict + # and set default column names column_dict = OrderedDict() primary_keys = OrderedDict() pk_name = None - #get inherited properties + # get inherited properties inherited_columns = OrderedDict() for base in bases: - for k,v in getattr(base, '_defined_columns', {}).items(): - inherited_columns.setdefault(k,v) + for k, v in getattr(base, '_defined_columns', {}).items(): + inherited_columns.setdefault(k, v) - #short circuit __abstract__ inheritance + # short circuit __abstract__ inheritance is_abstract = attrs['__abstract__'] = attrs.get('__abstract__', False) - #short circuit __polymorphic_key__ inheritance + # short circuit __polymorphic_key__ inheritance attrs['__polymorphic_key__'] = attrs.get('__polymorphic_key__', None) def _transform_column(col_name, col_obj): @@ -745,11 +752,10 @@ def _transform_column(col_name, col_obj): if col_obj.primary_key: primary_keys[col_name] = col_obj col_obj.set_column_name(col_name) - #set properties + # set properties attrs[col_name] = ColumnDescriptor(col_obj) - column_definitions = [(k,v) for k,v in attrs.items() if isinstance(v, columns.Column)] - #column_definitions = sorted(column_definitions, lambda x,y: cmp(x[1].position, y[1].position)) + column_definitions = [(k, v) for k, v in attrs.items() if isinstance(v, columns.Column)] column_definitions = sorted(column_definitions, key=lambda x: x[1].position) is_polymorphic_base = any([c[1].polymorphic_key for c in column_definitions]) @@ -780,7 +786,7 @@ def _get_polymorphic_base(bases): defined_columns = OrderedDict(column_definitions) # check for primary key - if not is_abstract and not any([v.primary_key for k,v in column_definitions]): + if not is_abstract and not any([v.primary_key for k, v in column_definitions]): raise ModelDefinitionException("At least 1 primary key is required.") counter_columns = [c for c in defined_columns.values() if isinstance(c, columns.Counter)] @@ -790,7 +796,7 @@ def _get_polymorphic_base(bases): has_partition_keys = any(v.partition_key for (k, v) in column_definitions) - #transform column definitions + # transform column definitions for k, v in column_definitions: # don't allow a column with the same name as a built-in attribute or method if k in BaseModel.__dict__: @@ -810,7 +816,7 @@ def _get_polymorphic_base(bases): partition_keys = OrderedDict(k for k in primary_keys.items() if k[1].partition_key) clustering_keys = OrderedDict(k for k in primary_keys.items() if not k[1].partition_key) - #setup partition key shortcut + # setup partition key shortcut if len(partition_keys) == 0: if not is_abstract: raise ModelException("at least one partition key must be defined") @@ -835,12 +841,12 @@ def _get_polymorphic_base(bases): raise ModelException("invalid clustering order {} for column {}".format(repr(v.clustering_order), v.db_field_name)) col_names.add(v.db_field_name) - #create db_name -> model name map for loading + # create db_name -> model name map for loading db_map = {} for field_name, col in column_dict.items(): db_map[col.db_field_name] = field_name - #add management members to the class + # add management members to the class attrs['_columns'] = column_dict attrs['_primary_keys'] = primary_keys attrs['_defined_columns'] = defined_columns @@ -862,29 +868,31 @@ def _get_polymorphic_base(bases): attrs['_polymorphic_column_name'] = polymorphic_column_name attrs['_polymorphic_map'] = {} if is_polymorphic_base else None - #setup class exceptions + # setup class exceptions DoesNotExistBase = None for base in bases: DoesNotExistBase = getattr(base, 'DoesNotExist', None) - if DoesNotExistBase is not None: break + if DoesNotExistBase is not None: + break + DoesNotExistBase = DoesNotExistBase or attrs.pop('DoesNotExist', BaseModel.DoesNotExist) attrs['DoesNotExist'] = type('DoesNotExist', (DoesNotExistBase,), {}) MultipleObjectsReturnedBase = None for base in bases: MultipleObjectsReturnedBase = getattr(base, 'MultipleObjectsReturned', None) - if MultipleObjectsReturnedBase is not None: break + if MultipleObjectsReturnedBase is not None: + break + MultipleObjectsReturnedBase = DoesNotExistBase or attrs.pop('MultipleObjectsReturned', BaseModel.MultipleObjectsReturned) attrs['MultipleObjectsReturned'] = type('MultipleObjectsReturned', (MultipleObjectsReturnedBase,), {}) - #create the class and add a QuerySet to it + # create the class and add a QuerySet to it klass = super(ModelMetaClass, cls).__new__(cls, name, bases, attrs) return klass -import six - @six.add_metaclass(ModelMetaClass) class Model(BaseModel): __abstract__ = True @@ -906,7 +914,7 @@ class Model(BaseModel): __default_ttl__ = None """ *Optional* The default ttl used by this model. - + This can be overridden by using the :meth:`~.ttl` method. """ diff --git a/cassandra/cqlengine/named.py b/cassandra/cqlengine/named.py index 0f0b86b262..108f8e6396 100644 --- a/cassandra/cqlengine/named.py +++ b/cassandra/cqlengine/named.py @@ -3,6 +3,7 @@ from cassandra.cqlengine.query import DoesNotExist as _DoesNotExist from cassandra.cqlengine.query import MultipleObjectsReturned as _MultipleObjectsReturned + class QuerySetDescriptor(object): """ returns a fresh queryset for the given model @@ -63,8 +64,11 @@ class NamedTable(object): objects = QuerySetDescriptor() - class DoesNotExist(_DoesNotExist): pass - class MultipleObjectsReturned(_MultipleObjectsReturned): pass + class DoesNotExist(_DoesNotExist): + pass + + class MultipleObjectsReturned(_MultipleObjectsReturned): + pass def __init__(self, keyspace, name): self.keyspace = keyspace @@ -118,4 +122,3 @@ def table(self, name): name that belongs to this keyspace """ return NamedTable(self.name, name) - diff --git a/cassandra/cqlengine/operators.py b/cassandra/cqlengine/operators.py index e609531fa1..c8f02e149f 100644 --- a/cassandra/cqlengine/operators.py +++ b/cassandra/cqlengine/operators.py @@ -2,7 +2,9 @@ import sys -class QueryOperatorException(Exception): pass +class QueryOperatorException(Exception): + pass + # move to central spot class UnicodeMixin(object): @@ -31,12 +33,14 @@ def get_operator(cls, symbol): raise QueryOperatorException("get_operator can only be called from a BaseQueryOperator subclass") if not hasattr(cls, 'opmap'): cls.opmap = {} + def _recurse(klass): if klass.symbol: cls.opmap[klass.symbol.upper()] = klass for subklass in klass.__subclasses__(): _recurse(subklass) pass + _recurse(cls) try: return cls.opmap[symbol.upper()] diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index 2dcef0189b..b46a57c1a7 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -1,6 +1,7 @@ import copy from datetime import datetime, timedelta import time +import six from cassandra.cqlengine import columns from cassandra.cqlengine.connection import execute, NOT_SET @@ -17,11 +18,16 @@ TransactionClause) -class QueryException(CQLEngineException): pass -class DoesNotExist(QueryException): pass -class MultipleObjectsReturned(QueryException): pass +class QueryException(CQLEngineException): + pass + + +class DoesNotExist(QueryException): + pass -import six + +class MultipleObjectsReturned(QueryException): + pass def check_applied(result): @@ -30,7 +36,7 @@ def check_applied(result): if that value is false, it means our light-weight transaction didn't applied to database. """ - if result and '[applied]' in result[0] and result[0]['[applied]'] == False: + if result and '[applied]' in result[0] and not result[0]['[applied]']: raise LWTException('') @@ -77,8 +83,8 @@ def __le__(self, other): class BatchType(object): - Unlogged = 'UNLOGGED' - Counter = 'COUNTER' + Unlogged = 'UNLOGGED' + Counter = 'COUNTER' class BatchQuery(object): @@ -194,8 +200,9 @@ def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): - #don't execute if there was an exception by default - if exc_type is not None and not self._execute_on_exception: return + # don't execute if there was an exception by default + if exc_type is not None and not self._execute_on_exception: + return self.execute() @@ -205,29 +212,29 @@ def __init__(self, model): super(AbstractQuerySet, self).__init__() self.model = model - #Where clause filters + # Where clause filters self._where = [] # Transaction clause filters self._transaction = [] - #ordering arguments + # ordering arguments self._order = [] self._allow_filtering = False - #CQL has a default limit of 10000, it's defined here - #because explicit is better than implicit + # CQL has a default limit of 10000, it's defined here + # because explicit is better than implicit self._limit = 10000 - #see the defer and only methods + # see the defer and only methods self._defer_fields = [] self._only_fields = [] self._values_list = False self._flat_values_list = False - #results cache + # results cache self._con = None self._cur = None self._result_cache = None @@ -265,7 +272,7 @@ def __call__(self, *args, **kwargs): def __deepcopy__(self, memo): clone = self.__class__(self.model) for k, v in self.__dict__.items(): - if k in ['_con', '_cur', '_result_cache', '_result_idx']: # don't clone these + if k in ['_con', '_cur', '_result_cache', '_result_idx']: # don't clone these clone.__dict__[k] = None elif k == '_batch': # we need to keep the same batch instance across @@ -284,7 +291,7 @@ def __len__(self): self._execute_query() return len(self._result_cache) - #----query generation / execution---- + # ----query generation / execution---- def _select_fields(self): """ returns the fields to select """ @@ -308,7 +315,7 @@ def _select_query(self): allow_filtering=self._allow_filtering ) - #----Reads------ + # ----Reads------ def _execute_query(self): if self._batch: @@ -330,7 +337,7 @@ def _fill_result_cache_to_idx(self, idx): self._result_idx += 1 self._result_cache[self._result_idx] = self._construct_result(self._result_cache[self._result_idx]) - #return the connection to the connection pool if we have all objects + # return the connection to the connection pool if we have all objects if self._result_cache and self._result_idx == (len(self._result_cache) - 1): self._con = None self._cur = None @@ -350,7 +357,7 @@ def __getitem__(self, s): num_results = len(self._result_cache) if isinstance(s, slice): - #calculate the amount of results that need to be loaded + # calculate the amount of results that need to be loaded end = num_results if s.step is None else s.step if end < 0: end += num_results @@ -359,11 +366,12 @@ def __getitem__(self, s): self._fill_result_cache_to_idx(end) return self._result_cache[s.start:s.stop:s.step] else: - #return the object at this index + # return the object at this index s = int(s) - #handle negative indexing - if s < 0: s += num_results + # handle negative indexing + if s < 0: + s += num_results if s >= num_results: raise IndexError @@ -453,7 +461,6 @@ def iff(self, *args, **kwargs): if not isinstance(val, Token): raise QueryException("Virtual column 'pk__token' may only be compared to Token() values") column = columns._PartitionKeysToken(self.model) - quote_field = False else: raise QueryException("Can't resolve column name: '{}'".format(col_name)) @@ -484,7 +491,7 @@ def filter(self, *args, **kwargs): Returns a QuerySet filtered on the keyword arguments """ - #add arguments to the where clause filters + # add arguments to the where clause filters if len([x for x in kwargs.values() if x is None]): raise CQLEngineException("None values on filter are not allowed") @@ -497,7 +504,7 @@ def filter(self, *args, **kwargs): for arg, val in kwargs.items(): col_name, col_op = self._parse_filter_arg(arg) quote_field = True - #resolve column and operator + # resolve column and operator try: column = self.model._get_column(col_name) except KeyError: @@ -519,7 +526,7 @@ def filter(self, *args, **kwargs): len(val.value), len(partition_columns))) val.set_columns(partition_columns) - #get query operator, or use equals if not supplied + # get query operator, or use equals if not supplied operator_class = BaseWhereOperator.get_operator(col_op or 'EQ') operator = operator_class() @@ -559,8 +566,7 @@ def get(self, *args, **kwargs): if len(self._result_cache) == 0: raise self.model.DoesNotExist elif len(self._result_cache) > 1: - raise self.model.MultipleObjectsReturned( - '{} objects found'.format(len(self._result_cache))) + raise self.model.MultipleObjectsReturned('{} objects found'.format(len(self._result_cache))) else: return self[0] @@ -665,7 +671,7 @@ def _only_or_defer(self, action, fields): if clone._defer_fields or clone._only_fields: raise QueryException("QuerySet alread has only or defer fields defined") - #check for strange fields + # check for strange fields missing_fields = [f for f in fields if f not in self.model._columns.keys()] if missing_fields: raise QueryException( @@ -698,7 +704,7 @@ def delete(self): """ Deletes the contents of a query """ - #validate where clause + # validate where clause partition_key = [x for x in self.model._primary_keys.values()][0] if not any([c.field == partition_key.column_name for c in self._where]): raise QueryException("The partition key must be defined on delete queries") @@ -759,14 +765,14 @@ class ModelQuerySet(AbstractQuerySet): """ def _validate_select_where(self): """ Checks that a filterset will not create invalid select statement """ - #check that there's either a = or IN relationship with a primary key or indexed field + # check that there's either a = or IN relationship with a primary key or indexed field equal_ops = [self.model._columns.get(w.field) for w in self._where if isinstance(w.operator, EqualsOperator)] token_comparison = any([w for w in self._where if isinstance(w.value, Token)]) if not any([w.primary_key or w.index for w in equal_ops]) and not token_comparison and not self._allow_filtering: raise QueryException('Where clauses require either a "=" or "IN" comparison with either a primary key or indexed field') if not self._allow_filtering: - #if the query is not on an indexed field + # if the query is not on an indexed field if not any([w.index for w in equal_ops]): if not any([w.partition_key for w in equal_ops]) and not token_comparison: raise QueryException('Filtering on a clustering key without a partition key is not allowed unless allow_filtering() is called on the querset') @@ -783,9 +789,9 @@ def _select_fields(self): def _get_result_constructor(self): """ Returns a function that will be used to instantiate query results """ - if not self._values_list: # we want models + if not self._values_list: # we want models return lambda rows: self.model._construct_instance(rows) - elif self._flat_values_list: # the user has requested flattened list (1 value per row) + elif self._flat_values_list: # the user has requested flattened list (1 value per row) return lambda row: row.popitem()[1] else: return lambda row: self._get_row_value_list(self._only_fields, row) @@ -803,7 +809,7 @@ def _get_ordering_condition(self, colname): if column is None: raise QueryException("Can't resolve the column name: '{}'".format(colname)) - #validate the column selection + # validate the column selection if not column.primary_key: raise QueryException( "Can't order on '{}', can only order on (clustered) primary keys".format(colname)) @@ -958,7 +964,7 @@ class Row(Model): # we should not provide default values in this use case. val = col.validate(val) - + if val is None: nulled_columns.add(col_name) continue @@ -1072,11 +1078,11 @@ def update(self): timestamp=self._timestamp, transactions=self._transaction) for name, col in self.instance._clustering_keys.items(): null_clustering_key = null_clustering_key and col._val_is_null(getattr(self.instance, name, None)) - #get defined fields and their column names + # get defined fields and their column names for name, col in self.model._columns.items(): # if clustering key is null, don't include non static columns if null_clustering_key and not col.static and not col.partition_key: - continue + continue if not col.is_primary_key: val = getattr(self.instance, name, None) val_mgr = self.instance._values[name] @@ -1088,19 +1094,24 @@ def update(self): # don't update something if it hasn't changed if not val_mgr.changed and not isinstance(col, columns.Counter): continue - + static_changed_only = static_changed_only and col.static if isinstance(col, (columns.BaseContainerColumn, columns.Counter)): # get appropriate clause - if isinstance(col, columns.List): klass = ListUpdateClause - elif isinstance(col, columns.Map): klass = MapUpdateClause - elif isinstance(col, columns.Set): klass = SetUpdateClause - elif isinstance(col, columns.Counter): klass = CounterUpdateClause - else: raise RuntimeError + if isinstance(col, columns.List): + klass = ListUpdateClause + elif isinstance(col, columns.Map): + klass = MapUpdateClause + elif isinstance(col, columns.Set): + klass = SetUpdateClause + elif isinstance(col, columns.Counter): + klass = CounterUpdateClause + else: + raise RuntimeError # do the stuff clause = klass(col.db_field_name, val, - previous=val_mgr.previous_value, column=col) + previous=val_mgr.previous_value, column=col) if clause.get_context_size() > 0: statement.add_assignment_clause(clause) else: @@ -1145,7 +1156,7 @@ def save(self): static_save_only = static_save_only and col._val_is_null(getattr(self.instance, name, None)) for name, col in self.instance._columns.items(): if static_save_only and not col.static and not col.partition_key: - continue + continue val = getattr(self.instance, name, None) if col._val_is_null(val): if self.instance._values[name].changed: @@ -1171,7 +1182,9 @@ def delete(self): ds = DeleteStatement(self.column_family_name, timestamp=self._timestamp) for name, col in self.model._primary_keys.items(): - if (not col.partition_key) and (getattr(self.instance, name) is None): continue + if (not col.partition_key) and (getattr(self.instance, name) is None): + continue + ds.add_where_clause(WhereClause( col.db_field_name, EqualsOperator(), diff --git a/cassandra/cqlengine/statements.py b/cassandra/cqlengine/statements.py index 917b986239..ec59c47031 100644 --- a/cassandra/cqlengine/statements.py +++ b/cassandra/cqlengine/statements.py @@ -7,7 +7,8 @@ from cassandra.cqlengine.operators import BaseWhereOperator, InOperator -class StatementException(Exception): pass +class StatementException(Exception): + pass class UnicodeMixin(object): @@ -16,6 +17,7 @@ class UnicodeMixin(object): else: __str__ = lambda x: six.text_type(x).encode('utf-8') + class ValueQuoter(UnicodeMixin): def __init__(self, value): @@ -28,7 +30,7 @@ def __unicode__(self): elif isinstance(self.value, (list, tuple)): return '[' + ', '.join([cql_quote(v) for v in self.value]) + ']' elif isinstance(self.value, dict): - return '{' + ', '.join([cql_quote(k) + ':' + cql_quote(v) for k,v in self.value.items()]) + '}' + return '{' + ', '.join([cql_quote(k) + ':' + cql_quote(v) for k, v in self.value.items()]) + '}' elif isinstance(self.value, set): return '{' + ', '.join([cql_quote(v) for v in self.value]) + '}' return cql_quote(self.value) @@ -215,7 +217,8 @@ def _analyze(self): self._analyzed = True def get_context_size(self): - if not self._analyzed: self._analyze() + if not self._analyzed: + self._analyze() if (self.previous is None and not self._assignments and self._additions is None and @@ -224,7 +227,8 @@ def get_context_size(self): return int(bool(self._assignments)) + int(bool(self._additions)) + int(bool(self._removals)) def update_context(self, ctx): - if not self._analyzed: self._analyze() + if not self._analyzed: + self._analyze() ctx_id = self.context_id if (self.previous is None and self._assignments is None and @@ -250,7 +254,8 @@ def __init__(self, field, value, operation=None, previous=None, column=None): self._prepend = None def __unicode__(self): - if not self._analyzed: self._analyze() + if not self._analyzed: + self._analyze() qs = [] ctx_id = self.context_id if self._assignments is not None: @@ -267,11 +272,13 @@ def __unicode__(self): return ', '.join(qs) def get_context_size(self): - if not self._analyzed: self._analyze() + if not self._analyzed: + self._analyze() return int(self._assignments is not None) + int(bool(self._append)) + int(bool(self._prepend)) def update_context(self, ctx): - if not self._analyzed: self._analyze() + if not self._analyzed: + self._analyze() ctx_id = self.context_id if self._assignments is not None: ctx[str(ctx_id)] = self._to_database(self._assignments) @@ -313,13 +320,13 @@ def _analyze(self): else: # the max start idx we want to compare - search_space = len(self.value) - max(0, len(self.previous)-1) + search_space = len(self.value) - max(0, len(self.previous) - 1) # the size of the sub lists we want to look at search_size = len(self.previous) for i in range(search_space): - #slice boundary + # slice boundary j = i + search_size sub = self.value[i:j] idx_cmp = lambda idx: self.previous[idx] == sub[idx] @@ -354,13 +361,15 @@ def _analyze(self): self._analyzed = True def get_context_size(self): - if not self._analyzed: self._analyze() + if not self._analyzed: + self._analyze() if self.previous is None and not self._updates: return 1 return len(self._updates or []) * 2 def update_context(self, ctx): - if not self._analyzed: self._analyze() + if not self._analyzed: + self._analyze() ctx_id = self.context_id if self.previous is None and not self._updates: ctx[str(ctx_id)] = {} @@ -372,7 +381,8 @@ def update_context(self, ctx): ctx_id += 2 def __unicode__(self): - if not self._analyzed: self._analyze() + if not self._analyzed: + self._analyze() qs = [] ctx_id = self.context_id @@ -439,16 +449,19 @@ def _analyze(self): self._analyzed = True def update_context(self, ctx): - if not self._analyzed: self._analyze() + if not self._analyzed: + self._analyze() for idx, key in enumerate(self._removals): ctx[str(self.context_id + idx)] = key def get_context_size(self): - if not self._analyzed: self._analyze() + if not self._analyzed: + self._analyze() return len(self._removals) def __unicode__(self): - if not self._analyzed: self._analyze() + if not self._analyzed: + self._analyze() return ', '.join(['"{}"[%({})s]'.format(self.field, self.context_id + i) for i in range(len(self._removals))]) @@ -521,7 +534,6 @@ def timestamp_normalized(self): def __unicode__(self): raise NotImplementedError - def __repr__(self): return self.__unicode__() @@ -645,13 +657,12 @@ def __init__(self, ttl=None, timestamp=None, if_not_exists=False): - super(InsertStatement, self).__init__( - table, - assignments=assignments, - consistency=consistency, - where=where, - ttl=ttl, - timestamp=timestamp) + super(InsertStatement, self).__init__(table, + assignments=assignments, + consistency=consistency, + where=where, + ttl=ttl, + timestamp=timestamp) self.if_not_exists = if_not_exists @@ -813,4 +824,3 @@ def __unicode__(self): qs += [self._where] return ' '.join(qs) - From 3419631439b900cdd6946a048eadcd4f20592647 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 10 Feb 2015 12:02:01 -0600 Subject: [PATCH 1216/3726] Update copyright in core files --- README.rst | 2 +- benchmarks/base.py | 2 +- benchmarks/callback_full_pipeline.py | 2 +- benchmarks/future_batches.py | 2 +- benchmarks/future_full_pipeline.py | 2 +- benchmarks/future_full_throttle.py | 2 +- benchmarks/sync.py | 2 +- cassandra/__init__.py | 2 +- cassandra/auth.py | 2 +- cassandra/cluster.py | 2 +- cassandra/concurrent.py | 2 +- cassandra/connection.py | 2 +- cassandra/cqltypes.py | 2 +- cassandra/decoder.py | 2 +- cassandra/encoder.py | 2 +- cassandra/io/__init__.py | 2 +- cassandra/io/asyncorereactor.py | 2 +- cassandra/io/eventletreactor.py | 2 +- cassandra/io/geventreactor.py | 2 +- cassandra/io/libevreactor.py | 2 +- cassandra/io/twistedreactor.py | 2 +- cassandra/marshal.py | 2 +- cassandra/metadata.py | 2 +- cassandra/metrics.py | 2 +- cassandra/policies.py | 2 +- cassandra/pool.py | 2 +- cassandra/protocol.py | 2 +- cassandra/query.py | 2 +- example.py | 2 +- setup.py | 2 +- tests/__init__.py | 2 +- tests/integration/__init__.py | 2 +- tests/integration/datatype_utils.py | 4 ++-- tests/integration/long/__init__.py | 2 +- tests/integration/long/test_consistency.py | 2 +- tests/integration/long/test_ipv6.py | 2 +- tests/integration/long/test_large_data.py | 2 +- tests/integration/long/test_loadbalancingpolicies.py | 2 +- tests/integration/long/test_schema.py | 2 +- tests/integration/long/utils.py | 2 +- tests/integration/standard/__init__.py | 2 +- tests/integration/standard/test_authentication.py | 2 +- tests/integration/standard/test_cluster.py | 2 +- tests/integration/standard/test_concurrent.py | 2 +- tests/integration/standard/test_connection.py | 2 +- tests/integration/standard/test_metadata.py | 2 +- tests/integration/standard/test_metrics.py | 2 +- tests/integration/standard/test_prepared_statements.py | 2 +- tests/integration/standard/test_query.py | 2 +- tests/integration/standard/test_query_paging.py | 2 +- tests/integration/standard/test_routing.py | 2 +- tests/integration/standard/test_row_factories.py | 2 +- tests/integration/standard/test_types.py | 2 +- tests/integration/standard/test_udts.py | 2 +- tests/integration/util.py | 2 +- tests/unit/__init__.py | 2 +- tests/unit/io/__init__.py | 2 +- tests/unit/io/test_asyncorereactor.py | 2 +- tests/unit/io/test_libevreactor.py | 2 +- tests/unit/io/test_twistedreactor.py | 2 +- tests/unit/test_connection.py | 2 +- tests/unit/test_control_connection.py | 2 +- tests/unit/test_host_connection_pool.py | 2 +- tests/unit/test_marshalling.py | 2 +- tests/unit/test_metadata.py | 2 +- tests/unit/test_orderedmap.py | 2 +- tests/unit/test_parameter_binding.py | 2 +- tests/unit/test_policies.py | 2 +- tests/unit/test_response_future.py | 2 +- tests/unit/test_sortedset.py | 2 +- tests/unit/test_types.py | 2 +- 71 files changed, 72 insertions(+), 72 deletions(-) diff --git a/README.rst b/README.rst index 7496774061..a0415c3c88 100644 --- a/README.rst +++ b/README.rst @@ -60,7 +60,7 @@ Features to be Added License ------- -Copyright 2013, 2014, 2015 DataStax +Copyright 2013-2015 DataStax Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/benchmarks/base.py b/benchmarks/base.py index 66ca724dc3..09e76a507d 100644 --- a/benchmarks/base.py +++ b/benchmarks/base.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 DataStax, Inc. +# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/benchmarks/callback_full_pipeline.py b/benchmarks/callback_full_pipeline.py index c79a6a3465..707d127c9b 100644 --- a/benchmarks/callback_full_pipeline.py +++ b/benchmarks/callback_full_pipeline.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 DataStax, Inc. +# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/benchmarks/future_batches.py b/benchmarks/future_batches.py index df1cac9730..a816ff2686 100644 --- a/benchmarks/future_batches.py +++ b/benchmarks/future_batches.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 DataStax, Inc. +# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/benchmarks/future_full_pipeline.py b/benchmarks/future_full_pipeline.py index 2b8eb625c8..5f3bcebe2f 100644 --- a/benchmarks/future_full_pipeline.py +++ b/benchmarks/future_full_pipeline.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 DataStax, Inc. +# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/benchmarks/future_full_throttle.py b/benchmarks/future_full_throttle.py index bd0b9bd643..1c2059f34c 100644 --- a/benchmarks/future_full_throttle.py +++ b/benchmarks/future_full_throttle.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 DataStax, Inc. +# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/benchmarks/sync.py b/benchmarks/sync.py index c5b7fbec43..d756a58bfe 100644 --- a/benchmarks/sync.py +++ b/benchmarks/sync.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 DataStax, Inc. +# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 96f085767c..fa178592eb 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 DataStax, Inc. +# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/auth.py b/cassandra/auth.py index fc13a82132..67d302a9e8 100644 --- a/cassandra/auth.py +++ b/cassandra/auth.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 DataStax, Inc. +# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/cluster.py b/cassandra/cluster.py index b639af6992..35bb072984 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 DataStax, Inc. +# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/concurrent.py b/cassandra/concurrent.py index e5ab4528e3..8d7743e1b4 100644 --- a/cassandra/concurrent.py +++ b/cassandra/concurrent.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 DataStax, Inc. +# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/connection.py b/cassandra/connection.py index 9a41a0d61c..928e7f6642 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 DataStax, Inc. +# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index 8a06560c4c..e109740f04 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 DataStax, Inc. +# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/decoder.py b/cassandra/decoder.py index e303e4958c..5c44d5b7ec 100644 --- a/cassandra/decoder.py +++ b/cassandra/decoder.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 DataStax, Inc. +# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/encoder.py b/cassandra/encoder.py index 9b478901fa..ba055687f7 100644 --- a/cassandra/encoder.py +++ b/cassandra/encoder.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 DataStax, Inc. +# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/io/__init__.py b/cassandra/io/__init__.py index 6df2a7312b..e4b89e5f46 100644 --- a/cassandra/io/__init__.py +++ b/cassandra/io/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 DataStax, Inc. +# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/io/asyncorereactor.py b/cassandra/io/asyncorereactor.py index 626622cc81..2ac7156d61 100644 --- a/cassandra/io/asyncorereactor.py +++ b/cassandra/io/asyncorereactor.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 DataStax, Inc. +# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/io/eventletreactor.py b/cassandra/io/eventletreactor.py index 357d8634e9..ceac6a951e 100644 --- a/cassandra/io/eventletreactor.py +++ b/cassandra/io/eventletreactor.py @@ -1,5 +1,5 @@ # Copyright 2014 Symantec Corporation -# Copyright 2013-2014 DataStax, Inc. +# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/io/geventreactor.py b/cassandra/io/geventreactor.py index 6be43ece17..4cd9c68109 100644 --- a/cassandra/io/geventreactor.py +++ b/cassandra/io/geventreactor.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 DataStax, Inc. +# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/io/libevreactor.py b/cassandra/io/libevreactor.py index 526b8a9d49..db11eaf8be 100644 --- a/cassandra/io/libevreactor.py +++ b/cassandra/io/libevreactor.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 DataStax, Inc. +# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/io/twistedreactor.py b/cassandra/io/twistedreactor.py index ac7f97e4c4..1a5a64e796 100644 --- a/cassandra/io/twistedreactor.py +++ b/cassandra/io/twistedreactor.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 DataStax, Inc. +# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/marshal.py b/cassandra/marshal.py index 1a78a97a8f..6451ab00ae 100644 --- a/cassandra/marshal.py +++ b/cassandra/marshal.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 DataStax, Inc. +# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/metadata.py b/cassandra/metadata.py index f51b9c9323..3fd267b781 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 DataStax, Inc. +# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/metrics.py b/cassandra/metrics.py index 8a07c06c8f..77ed8961e0 100644 --- a/cassandra/metrics.py +++ b/cassandra/metrics.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 DataStax, Inc. +# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/policies.py b/cassandra/policies.py index d4106a8db3..0dc36af76d 100644 --- a/cassandra/policies.py +++ b/cassandra/policies.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 DataStax, Inc. +# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/pool.py b/cassandra/pool.py index a99724b06b..19e9550f92 100644 --- a/cassandra/pool.py +++ b/cassandra/pool.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 DataStax, Inc. +# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/protocol.py b/cassandra/protocol.py index af279ef959..d8e523421a 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 DataStax, Inc. +# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cassandra/query.py b/cassandra/query.py index c159e7877b..e973ed2b2e 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 DataStax, Inc. +# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/example.py b/example.py index 30050f32cb..c7221880b7 100644 --- a/example.py +++ b/example.py @@ -1,6 +1,6 @@ #!/usr/bin/env python -# Copyright 2013-2014 DataStax, Inc. +# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/setup.py b/setup.py index dba353fbf2..106233901b 100644 --- a/setup.py +++ b/setup.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 DataStax, Inc. +# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/__init__.py b/tests/__init__.py index 5de2c7654b..e1c1e54e11 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 DataStax, Inc. +# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 9ca0266c8a..e68afc96c6 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 DataStax, Inc. +# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/datatype_utils.py b/tests/integration/datatype_utils.py index 1af91b25bb..41a4c09e01 100644 --- a/tests/integration/datatype_utils.py +++ b/tests/integration/datatype_utils.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 DataStax, Inc. +# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -136,4 +136,4 @@ def get_nonprim_sample(non_prim_type, datatype): elif non_prim_type == 'tuple': return (get_sample(datatype),) else: - raise Exception('Missing handling of non-primitive type {0}.'.format(non_prim_type)) \ No newline at end of file + raise Exception('Missing handling of non-primitive type {0}.'.format(non_prim_type)) diff --git a/tests/integration/long/__init__.py b/tests/integration/long/__init__.py index 76954be148..794d75bff4 100644 --- a/tests/integration/long/__init__.py +++ b/tests/integration/long/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 DataStax, Inc. +# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/long/test_consistency.py b/tests/integration/long/test_consistency.py index 7cb7d14a4c..b5715f8158 100644 --- a/tests/integration/long/test_consistency.py +++ b/tests/integration/long/test_consistency.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 DataStax, Inc. +# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/long/test_ipv6.py b/tests/integration/long/test_ipv6.py index 9d6dab1f83..3f416561ad 100644 --- a/tests/integration/long/test_ipv6.py +++ b/tests/integration/long/test_ipv6.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 DataStax, Inc. +# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/long/test_large_data.py b/tests/integration/long/test_large_data.py index 9fe1a6b6fe..a38e6f1c10 100644 --- a/tests/integration/long/test_large_data.py +++ b/tests/integration/long/test_large_data.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 DataStax, Inc. +# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/long/test_loadbalancingpolicies.py b/tests/integration/long/test_loadbalancingpolicies.py index 8bb9e3caa0..2c56f7bd89 100644 --- a/tests/integration/long/test_loadbalancingpolicies.py +++ b/tests/integration/long/test_loadbalancingpolicies.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 DataStax, Inc. +# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/long/test_schema.py b/tests/integration/long/test_schema.py index bad7df2cd1..af51924045 100644 --- a/tests/integration/long/test_schema.py +++ b/tests/integration/long/test_schema.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 DataStax, Inc. +# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/long/utils.py b/tests/integration/long/utils.py index ce0aa51a10..f5422ef233 100644 --- a/tests/integration/long/utils.py +++ b/tests/integration/long/utils.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 DataStax, Inc. +# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/standard/__init__.py b/tests/integration/standard/__init__.py index 76954be148..794d75bff4 100644 --- a/tests/integration/standard/__init__.py +++ b/tests/integration/standard/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 DataStax, Inc. +# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/standard/test_authentication.py b/tests/integration/standard/test_authentication.py index 1534947a28..c698c986e2 100644 --- a/tests/integration/standard/test_authentication.py +++ b/tests/integration/standard/test_authentication.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 DataStax, Inc. +# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index 95e6082b2f..69a771f812 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 DataStax, Inc. +# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/standard/test_concurrent.py b/tests/integration/standard/test_concurrent.py index 211c44206d..a7837d8a52 100644 --- a/tests/integration/standard/test_concurrent.py +++ b/tests/integration/standard/test_concurrent.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 DataStax, Inc. +# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/standard/test_connection.py b/tests/integration/standard/test_connection.py index 15e2b1988b..e52280642b 100644 --- a/tests/integration/standard/test_connection.py +++ b/tests/integration/standard/test_connection.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 DataStax, Inc. +# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 1e46d3c55a..b14d208231 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 DataStax, Inc. +# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/standard/test_metrics.py b/tests/integration/standard/test_metrics.py index 828ed6000c..c5f5da78ba 100644 --- a/tests/integration/standard/test_metrics.py +++ b/tests/integration/standard/test_metrics.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 DataStax, Inc. +# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/standard/test_prepared_statements.py b/tests/integration/standard/test_prepared_statements.py index e91e0edefb..42da2170e4 100644 --- a/tests/integration/standard/test_prepared_statements.py +++ b/tests/integration/standard/test_prepared_statements.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 DataStax, Inc. +# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index c0db307555..8c4692477c 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 DataStax, Inc. +# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/standard/test_query_paging.py b/tests/integration/standard/test_query_paging.py index cc3dea6bc1..8e4ccfdb18 100644 --- a/tests/integration/standard/test_query_paging.py +++ b/tests/integration/standard/test_query_paging.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 DataStax, Inc. +# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/standard/test_routing.py b/tests/integration/standard/test_routing.py index 9d09cc98f4..8172f8fc2d 100644 --- a/tests/integration/standard/test_routing.py +++ b/tests/integration/standard/test_routing.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 DataStax, Inc. +# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/standard/test_row_factories.py b/tests/integration/standard/test_row_factories.py index 354021f257..42fe4e260e 100644 --- a/tests/integration/standard/test_row_factories.py +++ b/tests/integration/standard/test_row_factories.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 DataStax, Inc. +# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/standard/test_types.py b/tests/integration/standard/test_types.py index 50d556b21f..84677f9930 100644 --- a/tests/integration/standard/test_types.py +++ b/tests/integration/standard/test_types.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 DataStax, Inc. +# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/standard/test_udts.py b/tests/integration/standard/test_udts.py index 87e76b3559..1aa8ccd5fe 100644 --- a/tests/integration/standard/test_udts.py +++ b/tests/integration/standard/test_udts.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 DataStax, Inc. +# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration/util.py b/tests/integration/util.py index 08ebac0657..aab617843d 100644 --- a/tests/integration/util.py +++ b/tests/integration/util.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 DataStax, Inc. +# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py index 6df2a7312b..e4b89e5f46 100644 --- a/tests/unit/__init__.py +++ b/tests/unit/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 DataStax, Inc. +# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/io/__init__.py b/tests/unit/io/__init__.py index 6df2a7312b..e4b89e5f46 100644 --- a/tests/unit/io/__init__.py +++ b/tests/unit/io/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 DataStax, Inc. +# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/io/test_asyncorereactor.py b/tests/unit/io/test_asyncorereactor.py index c2138aa5fa..17e42a0221 100644 --- a/tests/unit/io/test_asyncorereactor.py +++ b/tests/unit/io/test_asyncorereactor.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 DataStax, Inc. +# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/io/test_libevreactor.py b/tests/unit/io/test_libevreactor.py index 78e08f27a7..ddd2dc0417 100644 --- a/tests/unit/io/test_libevreactor.py +++ b/tests/unit/io/test_libevreactor.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 DataStax, Inc. +# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/io/test_twistedreactor.py b/tests/unit/io/test_twistedreactor.py index de22415b4c..8563b948c0 100644 --- a/tests/unit/io/test_twistedreactor.py +++ b/tests/unit/io/test_twistedreactor.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 DataStax, Inc. +# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/test_connection.py b/tests/unit/test_connection.py index 39ef8ce5c0..fd6f12717a 100644 --- a/tests/unit/test_connection.py +++ b/tests/unit/test_connection.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 DataStax, Inc. +# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/test_control_connection.py b/tests/unit/test_control_connection.py index a0cb65235d..1f1ccc1bf1 100644 --- a/tests/unit/test_control_connection.py +++ b/tests/unit/test_control_connection.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 DataStax, Inc. +# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/test_host_connection_pool.py b/tests/unit/test_host_connection_pool.py index 826440aacb..7a351afb2e 100644 --- a/tests/unit/test_host_connection_pool.py +++ b/tests/unit/test_host_connection_pool.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 DataStax, Inc. +# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/test_marshalling.py b/tests/unit/test_marshalling.py index 54686e24c4..d3703c311f 100644 --- a/tests/unit/test_marshalling.py +++ b/tests/unit/test_marshalling.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 DataStax, Inc. +# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/test_metadata.py b/tests/unit/test_metadata.py index 9099037369..0c59598a23 100644 --- a/tests/unit/test_metadata.py +++ b/tests/unit/test_metadata.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 DataStax, Inc. +# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/test_orderedmap.py b/tests/unit/test_orderedmap.py index 3cee3a11c5..f5000e604a 100644 --- a/tests/unit/test_orderedmap.py +++ b/tests/unit/test_orderedmap.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 DataStax, Inc. +# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/test_parameter_binding.py b/tests/unit/test_parameter_binding.py index efb7ff1aed..596f291e3a 100644 --- a/tests/unit/test_parameter_binding.py +++ b/tests/unit/test_parameter_binding.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 DataStax, Inc. +# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/test_policies.py b/tests/unit/test_policies.py index ba389c7662..b0f5861749 100644 --- a/tests/unit/test_policies.py +++ b/tests/unit/test_policies.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 DataStax, Inc. +# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/test_response_future.py b/tests/unit/test_response_future.py index bdb2ccb534..027fe73214 100644 --- a/tests/unit/test_response_future.py +++ b/tests/unit/test_response_future.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 DataStax, Inc. +# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/test_sortedset.py b/tests/unit/test_sortedset.py index 161fe56b92..a2e042c7a0 100644 --- a/tests/unit/test_sortedset.py +++ b/tests/unit/test_sortedset.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 DataStax, Inc. +# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/test_types.py b/tests/unit/test_types.py index 4588b85683..824dd4f9cf 100644 --- a/tests/unit/test_types.py +++ b/tests/unit/test_types.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 DataStax, Inc. +# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. From 28ca35cd7b38f55103dd6b270bbc469efc2ca7da Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 10 Feb 2015 12:03:42 -0600 Subject: [PATCH 1217/3726] Prepend license to cqlengine artifacts --- cassandra/cqlengine/__init__.py | 14 ++++++++++++++ cassandra/cqlengine/columns.py | 14 ++++++++++++++ cassandra/cqlengine/connection.py | 14 ++++++++++++++ cassandra/cqlengine/exceptions.py | 14 ++++++++++++++ cassandra/cqlengine/functions.py | 14 ++++++++++++++ cassandra/cqlengine/management.py | 14 ++++++++++++++ cassandra/cqlengine/models.py | 14 ++++++++++++++ cassandra/cqlengine/named.py | 14 ++++++++++++++ cassandra/cqlengine/operators.py | 14 ++++++++++++++ cassandra/cqlengine/query.py | 14 ++++++++++++++ cassandra/cqlengine/statements.py | 14 ++++++++++++++ tests/integration/cqlengine/__init__.py | 14 ++++++++++++++ tests/integration/cqlengine/base.py | 14 ++++++++++++++ tests/integration/cqlengine/columns/__init__.py | 14 ++++++++++++++ .../cqlengine/columns/test_container_columns.py | 14 ++++++++++++++ .../cqlengine/columns/test_counter_column.py | 14 ++++++++++++++ .../cqlengine/columns/test_static_column.py | 14 ++++++++++++++ .../cqlengine/columns/test_validation.py | 14 ++++++++++++++ .../integration/cqlengine/columns/test_value_io.py | 14 ++++++++++++++ .../integration/cqlengine/connections/__init__.py | 14 ++++++++++++++ tests/integration/cqlengine/management/__init__.py | 14 ++++++++++++++ .../management/test_compaction_settings.py | 14 ++++++++++++++ .../cqlengine/management/test_management.py | 14 ++++++++++++++ tests/integration/cqlengine/model/__init__.py | 14 ++++++++++++++ .../cqlengine/model/test_class_construction.py | 14 ++++++++++++++ .../cqlengine/model/test_equality_operations.py | 14 ++++++++++++++ tests/integration/cqlengine/model/test_model.py | 14 ++++++++++++++ tests/integration/cqlengine/model/test_model_io.py | 14 ++++++++++++++ .../cqlengine/model/test_polymorphism.py | 14 ++++++++++++++ tests/integration/cqlengine/model/test_updates.py | 14 ++++++++++++++ .../integration/cqlengine/model/test_validation.py | 14 ++++++++++++++ .../cqlengine/model/test_value_lists.py | 14 ++++++++++++++ tests/integration/cqlengine/operators/__init__.py | 14 ++++++++++++++ .../operators/test_assignment_operators.py | 14 ++++++++++++++ .../cqlengine/operators/test_base_operator.py | 14 ++++++++++++++ .../cqlengine/operators/test_where_operators.py | 14 ++++++++++++++ tests/integration/cqlengine/query/__init__.py | 14 ++++++++++++++ .../cqlengine/query/test_batch_query.py | 14 ++++++++++++++ .../cqlengine/query/test_datetime_queries.py | 14 ++++++++++++++ tests/integration/cqlengine/query/test_named.py | 14 ++++++++++++++ .../cqlengine/query/test_queryoperators.py | 14 ++++++++++++++ tests/integration/cqlengine/query/test_queryset.py | 14 ++++++++++++++ tests/integration/cqlengine/query/test_updates.py | 14 ++++++++++++++ tests/integration/cqlengine/statements/__init__.py | 14 ++++++++++++++ .../statements/test_assignment_clauses.py | 14 ++++++++++++++ .../statements/test_assignment_statement.py | 14 ++++++++++++++ .../cqlengine/statements/test_base_clause.py | 14 ++++++++++++++ .../cqlengine/statements/test_base_statement.py | 14 ++++++++++++++ .../cqlengine/statements/test_delete_statement.py | 14 ++++++++++++++ .../cqlengine/statements/test_insert_statement.py | 14 ++++++++++++++ .../cqlengine/statements/test_quoter.py | 14 ++++++++++++++ .../cqlengine/statements/test_select_statement.py | 14 ++++++++++++++ .../cqlengine/statements/test_update_statement.py | 14 ++++++++++++++ .../cqlengine/statements/test_where_clause.py | 14 ++++++++++++++ tests/integration/cqlengine/test_batch_query.py | 14 ++++++++++++++ tests/integration/cqlengine/test_consistency.py | 14 ++++++++++++++ tests/integration/cqlengine/test_ifnotexists.py | 14 ++++++++++++++ tests/integration/cqlengine/test_load.py | 14 ++++++++++++++ tests/integration/cqlengine/test_timestamp.py | 14 ++++++++++++++ tests/integration/cqlengine/test_transaction.py | 14 ++++++++++++++ tests/integration/cqlengine/test_ttl.py | 14 ++++++++++++++ 61 files changed, 854 insertions(+) diff --git a/cassandra/cqlengine/__init__.py b/cassandra/cqlengine/__init__.py index 1c61c4ee8e..684a94af96 100644 --- a/cassandra/cqlengine/__init__.py +++ b/cassandra/cqlengine/__init__.py @@ -1,3 +1,17 @@ +# Copyright 2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + # compaction SizeTieredCompactionStrategy = "SizeTieredCompactionStrategy" LeveledCompactionStrategy = "LeveledCompactionStrategy" diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index 2181413b3d..aa045db12c 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -1,3 +1,17 @@ +# Copyright 2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from copy import deepcopy, copy from datetime import datetime from datetime import date diff --git a/cassandra/cqlengine/connection.py b/cassandra/cqlengine/connection.py index d4b1dc96b5..ae094c2898 100644 --- a/cassandra/cqlengine/connection.py +++ b/cassandra/cqlengine/connection.py @@ -1,3 +1,17 @@ +# Copyright 2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from collections import namedtuple import logging import six diff --git a/cassandra/cqlengine/exceptions.py b/cassandra/cqlengine/exceptions.py index 8f60c90ac5..09327687e5 100644 --- a/cassandra/cqlengine/exceptions.py +++ b/cassandra/cqlengine/exceptions.py @@ -1,3 +1,17 @@ +# Copyright 2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + class CQLEngineException(Exception): pass diff --git a/cassandra/cqlengine/functions.py b/cassandra/cqlengine/functions.py index c77032391f..b00f245661 100644 --- a/cassandra/cqlengine/functions.py +++ b/cassandra/cqlengine/functions.py @@ -1,3 +1,17 @@ +# Copyright 2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from datetime import datetime import six import sys diff --git a/cassandra/cqlengine/management.py b/cassandra/cqlengine/management.py index d5b3f8178e..43425761b5 100644 --- a/cassandra/cqlengine/management.py +++ b/cassandra/cqlengine/management.py @@ -1,3 +1,17 @@ +# Copyright 2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from collections import namedtuple import json import logging diff --git a/cassandra/cqlengine/models.py b/cassandra/cqlengine/models.py index bbb4b96454..85d432b8e0 100644 --- a/cassandra/cqlengine/models.py +++ b/cassandra/cqlengine/models.py @@ -1,3 +1,17 @@ +# Copyright 2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import re import six diff --git a/cassandra/cqlengine/named.py b/cassandra/cqlengine/named.py index 108f8e6396..c8b1c4c20c 100644 --- a/cassandra/cqlengine/named.py +++ b/cassandra/cqlengine/named.py @@ -1,3 +1,17 @@ +# Copyright 2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from cassandra.cqlengine.exceptions import CQLEngineException from cassandra.cqlengine.query import AbstractQueryableColumn, SimpleQuerySet from cassandra.cqlengine.query import DoesNotExist as _DoesNotExist diff --git a/cassandra/cqlengine/operators.py b/cassandra/cqlengine/operators.py index c8f02e149f..fd18f58537 100644 --- a/cassandra/cqlengine/operators.py +++ b/cassandra/cqlengine/operators.py @@ -1,3 +1,17 @@ +# Copyright 2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import six import sys diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index b46a57c1a7..10da492168 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -1,3 +1,17 @@ +# Copyright 2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import copy from datetime import datetime, timedelta import time diff --git a/cassandra/cqlengine/statements.py b/cassandra/cqlengine/statements.py index ec59c47031..97dd4565a4 100644 --- a/cassandra/cqlengine/statements.py +++ b/cassandra/cqlengine/statements.py @@ -1,3 +1,17 @@ +# Copyright 2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from datetime import datetime, timedelta import time import six diff --git a/tests/integration/cqlengine/__init__.py b/tests/integration/cqlengine/__init__.py index 22069e43e6..42cde4be2e 100644 --- a/tests/integration/cqlengine/__init__.py +++ b/tests/integration/cqlengine/__init__.py @@ -1,3 +1,17 @@ +# Copyright 2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from cassandra.cqlengine import connection from cassandra.cqlengine.management import create_keyspace diff --git a/tests/integration/cqlengine/base.py b/tests/integration/cqlengine/base.py index ea3f0b5989..f0b75f5bd1 100644 --- a/tests/integration/cqlengine/base.py +++ b/tests/integration/cqlengine/base.py @@ -1,3 +1,17 @@ +# Copyright 2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import sys from unittest import TestCase diff --git a/tests/integration/cqlengine/columns/__init__.py b/tests/integration/cqlengine/columns/__init__.py index e69de29bb2..55181d93fe 100644 --- a/tests/integration/cqlengine/columns/__init__.py +++ b/tests/integration/cqlengine/columns/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/tests/integration/cqlengine/columns/test_container_columns.py b/tests/integration/cqlengine/columns/test_container_columns.py index 4de46c1518..5859f93629 100644 --- a/tests/integration/cqlengine/columns/test_container_columns.py +++ b/tests/integration/cqlengine/columns/test_container_columns.py @@ -1,3 +1,17 @@ +# Copyright 2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from datetime import datetime, timedelta import json from uuid import uuid4 diff --git a/tests/integration/cqlengine/columns/test_counter_column.py b/tests/integration/cqlengine/columns/test_counter_column.py index 074f7e52f0..caded408cb 100644 --- a/tests/integration/cqlengine/columns/test_counter_column.py +++ b/tests/integration/cqlengine/columns/test_counter_column.py @@ -1,3 +1,17 @@ +# Copyright 2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from uuid import uuid4 from cassandra.cqlengine import columns diff --git a/tests/integration/cqlengine/columns/test_static_column.py b/tests/integration/cqlengine/columns/test_static_column.py index d3f537f2cf..0fa0af4b70 100644 --- a/tests/integration/cqlengine/columns/test_static_column.py +++ b/tests/integration/cqlengine/columns/test_static_column.py @@ -1,3 +1,17 @@ +# Copyright 2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from unittest import skipUnless from uuid import uuid4 diff --git a/tests/integration/cqlengine/columns/test_validation.py b/tests/integration/cqlengine/columns/test_validation.py index d1dd90cc76..828e9f8504 100644 --- a/tests/integration/cqlengine/columns/test_validation.py +++ b/tests/integration/cqlengine/columns/test_validation.py @@ -1,3 +1,17 @@ +# Copyright 2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from datetime import datetime, timedelta, date, tzinfo from decimal import Decimal as D from unittest import TestCase diff --git a/tests/integration/cqlengine/columns/test_value_io.py b/tests/integration/cqlengine/columns/test_value_io.py index 16e34bf2cc..2ebfa89b34 100644 --- a/tests/integration/cqlengine/columns/test_value_io.py +++ b/tests/integration/cqlengine/columns/test_value_io.py @@ -1,3 +1,17 @@ +# Copyright 2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from datetime import datetime, timedelta from decimal import Decimal from uuid import uuid1, uuid4, UUID diff --git a/tests/integration/cqlengine/connections/__init__.py b/tests/integration/cqlengine/connections/__init__.py index e69de29bb2..55181d93fe 100644 --- a/tests/integration/cqlengine/connections/__init__.py +++ b/tests/integration/cqlengine/connections/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/tests/integration/cqlengine/management/__init__.py b/tests/integration/cqlengine/management/__init__.py index e69de29bb2..55181d93fe 100644 --- a/tests/integration/cqlengine/management/__init__.py +++ b/tests/integration/cqlengine/management/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/tests/integration/cqlengine/management/test_compaction_settings.py b/tests/integration/cqlengine/management/test_compaction_settings.py index 5187837a67..adb4afa784 100644 --- a/tests/integration/cqlengine/management/test_compaction_settings.py +++ b/tests/integration/cqlengine/management/test_compaction_settings.py @@ -1,3 +1,17 @@ +# Copyright 2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import copy import json from mock import patch diff --git a/tests/integration/cqlengine/management/test_management.py b/tests/integration/cqlengine/management/test_management.py index 4d8df35e8b..175b3cb599 100644 --- a/tests/integration/cqlengine/management/test_management.py +++ b/tests/integration/cqlengine/management/test_management.py @@ -1,3 +1,17 @@ +# Copyright 2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import mock from unittest import skipUnless diff --git a/tests/integration/cqlengine/model/__init__.py b/tests/integration/cqlengine/model/__init__.py index e69de29bb2..55181d93fe 100644 --- a/tests/integration/cqlengine/model/__init__.py +++ b/tests/integration/cqlengine/model/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/tests/integration/cqlengine/model/test_class_construction.py b/tests/integration/cqlengine/model/test_class_construction.py index 977240b187..015d992991 100644 --- a/tests/integration/cqlengine/model/test_class_construction.py +++ b/tests/integration/cqlengine/model/test_class_construction.py @@ -1,3 +1,17 @@ +# Copyright 2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from uuid import uuid4 import warnings diff --git a/tests/integration/cqlengine/model/test_equality_operations.py b/tests/integration/cqlengine/model/test_equality_operations.py index b5e89ce154..0c098deeac 100644 --- a/tests/integration/cqlengine/model/test_equality_operations.py +++ b/tests/integration/cqlengine/model/test_equality_operations.py @@ -1,3 +1,17 @@ +# Copyright 2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from unittest import skip from uuid import uuid4 from tests.integration.cqlengine.base import BaseCassEngTestCase diff --git a/tests/integration/cqlengine/model/test_model.py b/tests/integration/cqlengine/model/test_model.py index 4b11ebb34c..bdb045954a 100644 --- a/tests/integration/cqlengine/model/test_model.py +++ b/tests/integration/cqlengine/model/test_model.py @@ -1,3 +1,17 @@ +# Copyright 2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from unittest import TestCase from cassandra.cqlengine.models import Model, ModelDefinitionException diff --git a/tests/integration/cqlengine/model/test_model_io.py b/tests/integration/cqlengine/model/test_model_io.py index 51cac3c55d..b38672d388 100644 --- a/tests/integration/cqlengine/model/test_model_io.py +++ b/tests/integration/cqlengine/model/test_model_io.py @@ -1,3 +1,17 @@ +# Copyright 2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from uuid import uuid4 import random from datetime import date diff --git a/tests/integration/cqlengine/model/test_polymorphism.py b/tests/integration/cqlengine/model/test_polymorphism.py index c569d48d35..f272e42fb5 100644 --- a/tests/integration/cqlengine/model/test_polymorphism.py +++ b/tests/integration/cqlengine/model/test_polymorphism.py @@ -1,3 +1,17 @@ +# Copyright 2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import uuid import mock diff --git a/tests/integration/cqlengine/model/test_updates.py b/tests/integration/cqlengine/model/test_updates.py index eecad304bc..71bc989d92 100644 --- a/tests/integration/cqlengine/model/test_updates.py +++ b/tests/integration/cqlengine/model/test_updates.py @@ -1,3 +1,17 @@ +# Copyright 2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from uuid import uuid4 from mock import patch diff --git a/tests/integration/cqlengine/model/test_validation.py b/tests/integration/cqlengine/model/test_validation.py index 8b13789179..fa957a1416 100644 --- a/tests/integration/cqlengine/model/test_validation.py +++ b/tests/integration/cqlengine/model/test_validation.py @@ -1 +1,15 @@ +# Copyright 2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/tests/integration/cqlengine/model/test_value_lists.py b/tests/integration/cqlengine/model/test_value_lists.py index 893b7d964f..c3ddc28ea4 100644 --- a/tests/integration/cqlengine/model/test_value_lists.py +++ b/tests/integration/cqlengine/model/test_value_lists.py @@ -1,3 +1,17 @@ +# Copyright 2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import random from tests.integration.cqlengine.base import BaseCassEngTestCase diff --git a/tests/integration/cqlengine/operators/__init__.py b/tests/integration/cqlengine/operators/__init__.py index f6150a6d76..33f8baac19 100644 --- a/tests/integration/cqlengine/operators/__init__.py +++ b/tests/integration/cqlengine/operators/__init__.py @@ -1 +1,15 @@ +# Copyright 2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + __author__ = 'bdeggleston' diff --git a/tests/integration/cqlengine/operators/test_assignment_operators.py b/tests/integration/cqlengine/operators/test_assignment_operators.py index e69de29bb2..55181d93fe 100644 --- a/tests/integration/cqlengine/operators/test_assignment_operators.py +++ b/tests/integration/cqlengine/operators/test_assignment_operators.py @@ -0,0 +1,14 @@ +# Copyright 2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/tests/integration/cqlengine/operators/test_base_operator.py b/tests/integration/cqlengine/operators/test_base_operator.py index cb92935093..9a15f829fa 100644 --- a/tests/integration/cqlengine/operators/test_base_operator.py +++ b/tests/integration/cqlengine/operators/test_base_operator.py @@ -1,3 +1,17 @@ +# Copyright 2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from unittest import TestCase from cassandra.cqlengine.operators import BaseQueryOperator, QueryOperatorException diff --git a/tests/integration/cqlengine/operators/test_where_operators.py b/tests/integration/cqlengine/operators/test_where_operators.py index 5a241effcc..aa59f1390b 100644 --- a/tests/integration/cqlengine/operators/test_where_operators.py +++ b/tests/integration/cqlengine/operators/test_where_operators.py @@ -1,3 +1,17 @@ +# Copyright 2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from unittest import TestCase from cassandra.cqlengine.operators import * diff --git a/tests/integration/cqlengine/query/__init__.py b/tests/integration/cqlengine/query/__init__.py index e69de29bb2..55181d93fe 100644 --- a/tests/integration/cqlengine/query/__init__.py +++ b/tests/integration/cqlengine/query/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/tests/integration/cqlengine/query/test_batch_query.py b/tests/integration/cqlengine/query/test_batch_query.py index 93b37fb000..1247f680b2 100644 --- a/tests/integration/cqlengine/query/test_batch_query.py +++ b/tests/integration/cqlengine/query/test_batch_query.py @@ -1,3 +1,17 @@ +# Copyright 2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import mock from cassandra.cqlengine import columns diff --git a/tests/integration/cqlengine/query/test_datetime_queries.py b/tests/integration/cqlengine/query/test_datetime_queries.py index 8e0ccbdfd0..f69d97c0ea 100644 --- a/tests/integration/cqlengine/query/test_datetime_queries.py +++ b/tests/integration/cqlengine/query/test_datetime_queries.py @@ -1,3 +1,17 @@ +# Copyright 2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from datetime import datetime, timedelta from uuid import uuid4 diff --git a/tests/integration/cqlengine/query/test_named.py b/tests/integration/cqlengine/query/test_named.py index 90301b96f5..a06aa22d55 100644 --- a/tests/integration/cqlengine/query/test_named.py +++ b/tests/integration/cqlengine/query/test_named.py @@ -1,3 +1,17 @@ +# Copyright 2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from cassandra.cqlengine import operators from cassandra.cqlengine.named import NamedKeyspace from cassandra.cqlengine.operators import EqualsOperator, GreaterThanOrEqualOperator diff --git a/tests/integration/cqlengine/query/test_queryoperators.py b/tests/integration/cqlengine/query/test_queryoperators.py index 7e252209b4..02774cf2f3 100644 --- a/tests/integration/cqlengine/query/test_queryoperators.py +++ b/tests/integration/cqlengine/query/test_queryoperators.py @@ -1,3 +1,17 @@ +# Copyright 2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from datetime import datetime from cassandra.cqlengine import columns diff --git a/tests/integration/cqlengine/query/test_queryset.py b/tests/integration/cqlengine/query/test_queryset.py index 240b414ac4..e10290c4e8 100644 --- a/tests/integration/cqlengine/query/test_queryset.py +++ b/tests/integration/cqlengine/query/test_queryset.py @@ -1,3 +1,17 @@ +# Copyright 2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from __future__ import absolute_import from datetime import datetime import time diff --git a/tests/integration/cqlengine/query/test_updates.py b/tests/integration/cqlengine/query/test_updates.py index 714d6419be..8c5fb95c31 100644 --- a/tests/integration/cqlengine/query/test_updates.py +++ b/tests/integration/cqlengine/query/test_updates.py @@ -1,3 +1,17 @@ +# Copyright 2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from uuid import uuid4 from cassandra.cqlengine.exceptions import ValidationError from cassandra.cqlengine.query import QueryException diff --git a/tests/integration/cqlengine/statements/__init__.py b/tests/integration/cqlengine/statements/__init__.py index f6150a6d76..33f8baac19 100644 --- a/tests/integration/cqlengine/statements/__init__.py +++ b/tests/integration/cqlengine/statements/__init__.py @@ -1 +1,15 @@ +# Copyright 2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + __author__ = 'bdeggleston' diff --git a/tests/integration/cqlengine/statements/test_assignment_clauses.py b/tests/integration/cqlengine/statements/test_assignment_clauses.py index aea3b8132e..e322d1dbc8 100644 --- a/tests/integration/cqlengine/statements/test_assignment_clauses.py +++ b/tests/integration/cqlengine/statements/test_assignment_clauses.py @@ -1,3 +1,17 @@ +# Copyright 2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from unittest import TestCase from cassandra.cqlengine.statements import AssignmentClause, SetUpdateClause, ListUpdateClause, MapUpdateClause, MapDeleteClause, FieldDeleteClause, CounterUpdateClause diff --git a/tests/integration/cqlengine/statements/test_assignment_statement.py b/tests/integration/cqlengine/statements/test_assignment_statement.py index c33e2177c8..40ba73be36 100644 --- a/tests/integration/cqlengine/statements/test_assignment_statement.py +++ b/tests/integration/cqlengine/statements/test_assignment_statement.py @@ -1,3 +1,17 @@ +# Copyright 2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from unittest import TestCase from cassandra.cqlengine.statements import AssignmentStatement, StatementException diff --git a/tests/integration/cqlengine/statements/test_base_clause.py b/tests/integration/cqlengine/statements/test_base_clause.py index 969757a01d..2cb1bc2f03 100644 --- a/tests/integration/cqlengine/statements/test_base_clause.py +++ b/tests/integration/cqlengine/statements/test_base_clause.py @@ -1,3 +1,17 @@ +# Copyright 2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from unittest import TestCase from cassandra.cqlengine.statements import BaseClause diff --git a/tests/integration/cqlengine/statements/test_base_statement.py b/tests/integration/cqlengine/statements/test_base_statement.py index f0b1793a1a..5d8e6844a4 100644 --- a/tests/integration/cqlengine/statements/test_base_statement.py +++ b/tests/integration/cqlengine/statements/test_base_statement.py @@ -1,3 +1,17 @@ +# Copyright 2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from unittest import TestCase from cassandra.cqlengine.statements import BaseCQLStatement, StatementException diff --git a/tests/integration/cqlengine/statements/test_delete_statement.py b/tests/integration/cqlengine/statements/test_delete_statement.py index 79b8a78f1c..c8283e2bc5 100644 --- a/tests/integration/cqlengine/statements/test_delete_statement.py +++ b/tests/integration/cqlengine/statements/test_delete_statement.py @@ -1,3 +1,17 @@ +# Copyright 2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from unittest import TestCase from cassandra.cqlengine.statements import DeleteStatement, WhereClause, MapDeleteClause from cassandra.cqlengine.operators import * diff --git a/tests/integration/cqlengine/statements/test_insert_statement.py b/tests/integration/cqlengine/statements/test_insert_statement.py index a29bc9ba3a..302e04d80b 100644 --- a/tests/integration/cqlengine/statements/test_insert_statement.py +++ b/tests/integration/cqlengine/statements/test_insert_statement.py @@ -1,3 +1,17 @@ +# Copyright 2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from unittest import TestCase from cassandra.cqlengine.statements import InsertStatement, StatementException, AssignmentClause diff --git a/tests/integration/cqlengine/statements/test_quoter.py b/tests/integration/cqlengine/statements/test_quoter.py index e69de29bb2..55181d93fe 100644 --- a/tests/integration/cqlengine/statements/test_quoter.py +++ b/tests/integration/cqlengine/statements/test_quoter.py @@ -0,0 +1,14 @@ +# Copyright 2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/tests/integration/cqlengine/statements/test_select_statement.py b/tests/integration/cqlengine/statements/test_select_statement.py index 2f8165199f..b20feab774 100644 --- a/tests/integration/cqlengine/statements/test_select_statement.py +++ b/tests/integration/cqlengine/statements/test_select_statement.py @@ -1,3 +1,17 @@ +# Copyright 2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from unittest import TestCase from cassandra.cqlengine.statements import SelectStatement, WhereClause from cassandra.cqlengine.operators import * diff --git a/tests/integration/cqlengine/statements/test_update_statement.py b/tests/integration/cqlengine/statements/test_update_statement.py index 72b705d726..070710146b 100644 --- a/tests/integration/cqlengine/statements/test_update_statement.py +++ b/tests/integration/cqlengine/statements/test_update_statement.py @@ -1,3 +1,17 @@ +# Copyright 2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from unittest import TestCase from cassandra.cqlengine.columns import Set, List from cassandra.cqlengine.operators import * diff --git a/tests/integration/cqlengine/statements/test_where_clause.py b/tests/integration/cqlengine/statements/test_where_clause.py index 8ad71083fe..d29bc690e2 100644 --- a/tests/integration/cqlengine/statements/test_where_clause.py +++ b/tests/integration/cqlengine/statements/test_where_clause.py @@ -1,3 +1,17 @@ +# Copyright 2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from unittest import TestCase import six from cassandra.cqlengine.operators import EqualsOperator diff --git a/tests/integration/cqlengine/test_batch_query.py b/tests/integration/cqlengine/test_batch_query.py index cd53b7f89e..85f3019ac9 100644 --- a/tests/integration/cqlengine/test_batch_query.py +++ b/tests/integration/cqlengine/test_batch_query.py @@ -1,3 +1,17 @@ +# Copyright 2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import sure from cassandra.cqlengine import columns diff --git a/tests/integration/cqlengine/test_consistency.py b/tests/integration/cqlengine/test_consistency.py index 1137d82845..85b2f17289 100644 --- a/tests/integration/cqlengine/test_consistency.py +++ b/tests/integration/cqlengine/test_consistency.py @@ -1,3 +1,17 @@ +# Copyright 2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import mock from uuid import uuid4 diff --git a/tests/integration/cqlengine/test_ifnotexists.py b/tests/integration/cqlengine/test_ifnotexists.py index c6402561d7..d1c5b4ec28 100644 --- a/tests/integration/cqlengine/test_ifnotexists.py +++ b/tests/integration/cqlengine/test_ifnotexists.py @@ -1,3 +1,17 @@ +# Copyright 2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import mock from unittest import skipUnless from uuid import uuid4 diff --git a/tests/integration/cqlengine/test_load.py b/tests/integration/cqlengine/test_load.py index 15c93d7a2f..8c8d1d7923 100644 --- a/tests/integration/cqlengine/test_load.py +++ b/tests/integration/cqlengine/test_load.py @@ -1,3 +1,17 @@ +# Copyright 2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import gc import os import resource diff --git a/tests/integration/cqlengine/test_timestamp.py b/tests/integration/cqlengine/test_timestamp.py index eb7e6145af..5d37710ce6 100644 --- a/tests/integration/cqlengine/test_timestamp.py +++ b/tests/integration/cqlengine/test_timestamp.py @@ -1,3 +1,17 @@ +# Copyright 2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from datetime import timedelta, datetime import mock import sure diff --git a/tests/integration/cqlengine/test_transaction.py b/tests/integration/cqlengine/test_transaction.py index 3b4fe68618..b2b2511a03 100644 --- a/tests/integration/cqlengine/test_transaction.py +++ b/tests/integration/cqlengine/test_transaction.py @@ -1,3 +1,17 @@ +# Copyright 2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import mock import six from unittest import skipUnless diff --git a/tests/integration/cqlengine/test_ttl.py b/tests/integration/cqlengine/test_ttl.py index 96c7d48e9f..3d091e5337 100644 --- a/tests/integration/cqlengine/test_ttl.py +++ b/tests/integration/cqlengine/test_ttl.py @@ -1,3 +1,17 @@ +# Copyright 2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from cassandra.cqlengine.management import sync_table, drop_table from tests.integration.cqlengine.base import BaseCassEngTestCase from cassandra.cqlengine.models import Model From b2a5cb9348de66241ccf1b0fc4a4d91edd07c189 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 10 Feb 2015 13:22:32 -0600 Subject: [PATCH 1218/3726] Columns doc corrections --- cassandra/cqlengine/columns.py | 14 +++++++------- docs/api/cassandra/cqlengine/columns.rst | 4 ++-- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index aa045db12c..3be8bfefb9 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -108,7 +108,7 @@ class Column(object): primary_key = False """ bool flag, indicates this column is a primary key. The first primary key defined - on a model is the partition key (unless partition keys are set), all others are cluster keys + on a model is the partition key (unless partition keys are set), all others are cluster keys """ partition_key = False @@ -123,12 +123,12 @@ class Column(object): bool flag, indicates an index should be created for this column """ - db_field = False + db_field = None """ the fieldname this field will map to in the database """ - default = False + default = None """ the default value, can be a value or a callable (no args) """ @@ -136,19 +136,19 @@ class Column(object): required = False """ boolean, is the field required? Model validation will raise and - exception if required is set to True and there is a None value assigned + exception if required is set to True and there is a None value assigned """ - clustering_order = False + clustering_order = None """ only applicable on clustering keys (primary keys that are not partition keys) - determines the order that the clustering keys are sorted on disk + determines the order that the clustering keys are sorted on disk """ polymorphic_key = False """ boolean, if set to True, this column will be used for saving and loading instances - of polymorphic tables + of polymorphic tables """ static = False diff --git a/docs/api/cassandra/cqlengine/columns.rst b/docs/api/cassandra/cqlengine/columns.rst index 6148dc67b1..23a6157b22 100644 --- a/docs/api/cassandra/cqlengine/columns.rst +++ b/docs/api/cassandra/cqlengine/columns.rst @@ -17,8 +17,6 @@ Columns .. autoattribute:: primary_key - .. autoattribute:: primary_key - .. autoattribute:: partition_key .. autoattribute:: index @@ -50,6 +48,8 @@ Columns of all types are initialized by passing :class:`.Column` attributes to t .. autoclass:: Boolean(**kwargs) +.. autoclass:: Counter(**kwargs) + .. autoclass:: Date(**kwargs) .. autoclass:: DateTime(**kwargs) From 46ba8061faa72fb98ac87069be424c5dec06026e Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 10 Feb 2015 13:57:36 -0600 Subject: [PATCH 1219/3726] Additional methods for setting up mapper session. default - use localhost connection set_session - use a session bootstrapped in a different context --- cassandra/cqlengine/connection.py | 20 +++++++++++++++++++- docs/api/cassandra/cqlengine/connection.rst | 5 ++++- 2 files changed, 23 insertions(+), 2 deletions(-) diff --git a/cassandra/cqlengine/connection.py b/cassandra/cqlengine/connection.py index ae094c2898..9ce8591a41 100644 --- a/cassandra/cqlengine/connection.py +++ b/cassandra/cqlengine/connection.py @@ -36,7 +36,25 @@ class CQLConnectionError(CQLEngineException): cluster = None session = None lazy_connect_args = None -default_consistency_level = None +default_consistency_level = ConsistencyLevel.ONE + + +def default(): + """ + Configures the global mapper connection to localhost, using the driver defaults + """ + global cluster, session + cluster = Cluster() + session = cluster.connect() + + +def set_session(s): + """ + Configures the global mapper connection with a preexisting :class:`cassandra.cluster.Session` + """ + global cluster, session + session = s + cluster = s.cluster def setup( diff --git a/docs/api/cassandra/cqlengine/connection.rst b/docs/api/cassandra/cqlengine/connection.rst index 960998c58e..184a6026cb 100644 --- a/docs/api/cassandra/cqlengine/connection.rst +++ b/docs/api/cassandra/cqlengine/connection.rst @@ -3,5 +3,8 @@ .. module:: cassandra.cqlengine.connection -.. autofunction:: setup +.. autofunction:: default + +.. autofunction:: set_session +.. autofunction:: setup From 1e9d11dbd0fa5a185291cac629cc914e1abb10fa Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 10 Feb 2015 14:01:09 -0600 Subject: [PATCH 1220/3726] cqlengine.management create keyspace API with explicit strategies --- cassandra/cqlengine/management.py | 57 +++++++++++---------- docs/api/cassandra/cqlengine/management.rst | 4 +- 2 files changed, 34 insertions(+), 27 deletions(-) diff --git a/cassandra/cqlengine/management.py b/cassandra/cqlengine/management.py index 43425761b5..9058b64020 100644 --- a/cassandra/cqlengine/management.py +++ b/cassandra/cqlengine/management.py @@ -17,6 +17,7 @@ import logging import six +from cassandra.metadata import KeyspaceMetadata from cassandra.cqlengine import SizeTieredCompactionStrategy, LeveledCompactionStrategy from cassandra.cqlengine.connection import execute, get_cluster from cassandra.cqlengine.exceptions import CQLEngineException @@ -32,10 +33,9 @@ schema_columnfamilies = NamedTable('system', 'schema_columnfamilies') -def create_keyspace(name, strategy_class, replication_factor, durable_writes=True, **replication_values): +def create_keyspace_simple(name, replication_factor, durable_writes=True): """ - *Deprecated - this will likely be repaced with something specialized per replication strategy.* - Creates a keyspace + Creates a keyspace with SimpleStrategy for replica placement If the keyspace already exists, it will not be modified. @@ -45,35 +45,40 @@ def create_keyspace(name, strategy_class, replication_factor, durable_writes=Tru *There are plans to guard schema-modifying functions with an environment-driven conditional.* :param str name: name of keyspace to create - :param str strategy_class: keyspace replication strategy class (:attr:`~.SimpleStrategy` or :attr:`~.NetworkTopologyStrategy` :param int replication_factor: keyspace replication factor, used with :attr:`~.SimpleStrategy` :param bool durable_writes: Write log is bypassed if set to False - :param \*\*replication_values: Additional values to ad to the replication options map """ + _create_keyspace(name, durable_writes, 'SimpleStrategy', + {'replication_factor': replication_factor}) + + +def create_keyspace_network_topology(name, dc_replication_map, durable_writes=True): + """ + Creates a keyspace with NetworkTopologyStrategy for replica placement + + If the keyspace already exists, it will not be modified. + + **This function should be used with caution, especially in production environments. + Take care to execute schema modifications in a single context (i.e. not concurrently with other clients).** + + *There are plans to guard schema-modifying functions with an environment-driven conditional.* + + :param str name: name of keyspace to create + :param dict dc_replication_map: map of dc_names: replication_factor + :param bool durable_writes: Write log is bypassed if set to False + """ + _create_keyspace(name, durable_writes, 'NetworkTopologyStrategy', dc_replication_map) + + +def _create_keyspace(name, durable_writes, strategy_class, strategy_options): cluster = get_cluster() if name not in cluster.metadata.keyspaces: - # try the 1.2 method - replication_map = { - 'class': strategy_class, - 'replication_factor': replication_factor - } - replication_map.update(replication_values) - if strategy_class.lower() != 'simplestrategy': - # Although the Cassandra documentation states for `replication_factor` - # that it is "Required if class is SimpleStrategy; otherwise, - # not used." we get an error if it is present. - replication_map.pop('replication_factor', None) - - query = """ - CREATE KEYSPACE {} - WITH REPLICATION = {} - """.format(name, json.dumps(replication_map).replace('"', "'")) - - if strategy_class != 'SimpleStrategy': - query += " AND DURABLE_WRITES = {}".format('true' if durable_writes else 'false') - - execute(query) + log.info("Creating keyspace %s ", name) + ks_meta = KeyspaceMetadata(name, durable_writes, strategy_class, strategy_options) + execute(ks_meta.as_cql_query()) + else: + log.info("Not creating keyspace %s because it already exists", name) def delete_keyspace(name): diff --git a/docs/api/cassandra/cqlengine/management.rst b/docs/api/cassandra/cqlengine/management.rst index d1a82d7d09..bacc9fc71a 100644 --- a/docs/api/cassandra/cqlengine/management.rst +++ b/docs/api/cassandra/cqlengine/management.rst @@ -5,7 +5,9 @@ A collection of functions for managing keyspace and table schema. -.. autofunction:: create_keyspace +.. autofunction:: create_keyspace_simple + +.. autofunction:: create_keyspace_network_topology .. autofunction:: delete_keyspace From 44f061c3b12e47e5144da1b1217633d2b8738a80 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 10 Feb 2015 14:04:09 -0600 Subject: [PATCH 1221/3726] delete_keyspace --> drop_keyspace make verb match CQL --- cassandra/cqlengine/management.py | 8 ++++---- docs/api/cassandra/cqlengine/management.rst | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/cassandra/cqlengine/management.py b/cassandra/cqlengine/management.py index 9058b64020..f9c1aa3d5f 100644 --- a/cassandra/cqlengine/management.py +++ b/cassandra/cqlengine/management.py @@ -81,16 +81,16 @@ def _create_keyspace(name, durable_writes, strategy_class, strategy_options): log.info("Not creating keyspace %s because it already exists", name) -def delete_keyspace(name): +def drop_keyspace(name): """ + Drops a keyspace, if it exists. + *There are plans to guard schema-modifying functions with an environment-driven conditional.* **This function should be used with caution, especially in production environments. Take care to execute schema modifications in a single context (i.e. not concurrently with other clients).** - Drops a keyspace, if it exists. - - :param str name: name of keyspace to delete + :param str name: name of keyspace to drop """ cluster = get_cluster() if name in cluster.metadata.keyspaces: diff --git a/docs/api/cassandra/cqlengine/management.rst b/docs/api/cassandra/cqlengine/management.rst index bacc9fc71a..1a00d8ad06 100644 --- a/docs/api/cassandra/cqlengine/management.rst +++ b/docs/api/cassandra/cqlengine/management.rst @@ -9,7 +9,7 @@ A collection of functions for managing keyspace and table schema. .. autofunction:: create_keyspace_network_topology -.. autofunction:: delete_keyspace +.. autofunction:: drop_keyspace .. autofunction:: sync_table From cfd8091b478b780c3317683385ff5e5f486f3cec Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 10 Feb 2015 14:52:21 -0600 Subject: [PATCH 1222/3726] Deprecation warnings for old management functions --- cassandra/cqlengine/management.py | 55 +++++++++++++++++++++++++++++++ 1 file changed, 55 insertions(+) diff --git a/cassandra/cqlengine/management.py b/cassandra/cqlengine/management.py index f9c1aa3d5f..7c7fcff44b 100644 --- a/cassandra/cqlengine/management.py +++ b/cassandra/cqlengine/management.py @@ -16,6 +16,7 @@ import json import logging import six +import warnings from cassandra.metadata import KeyspaceMetadata from cassandra.cqlengine import SizeTieredCompactionStrategy, LeveledCompactionStrategy @@ -33,6 +34,55 @@ schema_columnfamilies = NamedTable('system', 'schema_columnfamilies') +def create_keyspace(name, strategy_class, replication_factor, durable_writes=True, **replication_values): + """ + *Deprecated - use :func:`create_keyspace_simple` or :func:`create_keyspace_network_topology` instead* + + Creates a keyspace + + If the keyspace already exists, it will not be modified. + + **This function should be used with caution, especially in production environments. + Take care to execute schema modifications in a single context (i.e. not concurrently with other clients).** + + *There are plans to guard schema-modifying functions with an environment-driven conditional.* + + :param str name: name of keyspace to create + :param str strategy_class: keyspace replication strategy class (:attr:`~.SimpleStrategy` or :attr:`~.NetworkTopologyStrategy` + :param int replication_factor: keyspace replication factor, used with :attr:`~.SimpleStrategy` + :param bool durable_writes: Write log is bypassed if set to False + :param \*\*replication_values: Additional values to ad to the replication options map + """ + msg = "Deprecated. Use create_keyspace_simple or create_keyspace_network_topology instead" + warnings.warn(msg, DeprecationWarning) + log.warn(msg) + + cluster = get_cluster() + + if name not in cluster.metadata.keyspaces: + # try the 1.2 method + replication_map = { + 'class': strategy_class, + 'replication_factor': replication_factor + } + replication_map.update(replication_values) + if strategy_class.lower() != 'simplestrategy': + # Although the Cassandra documentation states for `replication_factor` + # that it is "Required if class is SimpleStrategy; otherwise, + # not used." we get an error if it is present. + replication_map.pop('replication_factor', None) + + query = """ + CREATE KEYSPACE {} + WITH REPLICATION = {} + """.format(name, json.dumps(replication_map).replace('"', "'")) + + if strategy_class != 'SimpleStrategy': + query += " AND DURABLE_WRITES = {}".format('true' if durable_writes else 'false') + + execute(query) + + def create_keyspace_simple(name, replication_factor, durable_writes=True): """ Creates a keyspace with SimpleStrategy for replica placement @@ -80,6 +130,11 @@ def _create_keyspace(name, durable_writes, strategy_class, strategy_options): else: log.info("Not creating keyspace %s because it already exists", name) +def delete_keyspace(name): + msg = "Deprecated. Use drop_keyspace instead" + warnings.warn(msg, DeprecationWarning) + log.warn(msg) + drop_keyspace(name) def drop_keyspace(name): """ From 13cc0ea14b721c22c71200f858f07a22509ed687 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 10 Feb 2015 16:47:41 -0600 Subject: [PATCH 1223/3726] Add mapper example at root, rename existing example --- cassandra/cqlengine/connection.py | 7 ++ example.py => example_core.py | 0 example_mapper.py | 106 ++++++++++++++++++++++++++++++ 3 files changed, 113 insertions(+) rename example.py => example_core.py (100%) create mode 100755 example_mapper.py diff --git a/cassandra/cqlengine/connection.py b/cassandra/cqlengine/connection.py index 9ce8591a41..6c5d41526d 100644 --- a/cassandra/cqlengine/connection.py +++ b/cassandra/cqlengine/connection.py @@ -42,17 +42,24 @@ class CQLConnectionError(CQLEngineException): def default(): """ Configures the global mapper connection to localhost, using the driver defaults + (except for row_factory) """ global cluster, session cluster = Cluster() session = cluster.connect() + session.row_factory = dict_factory def set_session(s): """ Configures the global mapper connection with a preexisting :class:`cassandra.cluster.Session` + + Note: the mapper presently requires a Session :attr:`~.row_factory` set to ``dict_factory``. + This may be relaxed in the future """ global cluster, session + if s.row_factory is not dict_factory: + raise CQLEngineException("Failed to initialize: 'Session.row_factory' must be 'dict_factory'.") session = s cluster = s.cluster diff --git a/example.py b/example_core.py similarity index 100% rename from example.py rename to example_core.py diff --git a/example_mapper.py b/example_mapper.py new file mode 100755 index 0000000000..eda77c2e73 --- /dev/null +++ b/example_mapper.py @@ -0,0 +1,106 @@ +#!/usr/bin/env python + +# Copyright 2013-2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging + +log = logging.getLogger() +log.setLevel('DEBUG') +handler = logging.StreamHandler() +handler.setFormatter(logging.Formatter("%(asctime)s [%(levelname)s] %(name)s: %(message)s")) +log.addHandler(handler) + +from uuid import uuid4 + +from cassandra.cqlengine import columns +from cassandra.cqlengine import connection +from cassandra.cqlengine import management +from cassandra.cqlengine.exceptions import ValidationError +from cassandra.cqlengine.models import Model +from cassandra.cqlengine.query import BatchQuery + +KEYSPACE = "testkeyspace" + + +class FamilyMembers(Model): + __keyspace__ = KEYSPACE + id = columns.UUID(primary_key=True, default=uuid4) + surname = columns.Text(primary_key=True) + name = columns.Text(primary_key=True) + birth_year = columns.Integer() + sex = columns.Text(min_length=1, max_length=1) + + def validate(self): + super(FamilyMembers, self).validate() + if self.sex and self.sex not in 'mf': + raise ValidationError("FamilyMember.sex must be one of ['m', 'f']") + + if self.birth_year and self.sex == 'f': + raise ValidationError("FamilyMember.birth_year is set, and 'a lady never tells'") + + +def main(): + connection.default() + + # Management functions would normally be used in development, and possibly for deployments. + # They are typically not part of a core application. + log.info("### creating keyspace...") + management.create_keyspace_simple(KEYSPACE, 1) + log.info("### syncing model...") + management.sync_table(FamilyMembers) + + log.info("### add entities serially") + simmons = FamilyMembers.create(surname='Simmons', name='Gene', birth_year=1949, sex='m') # default uuid is assigned + + # add members later + FamilyMembers.create(id=simmons.id, surname='Simmons', name='Nick', birth_year=1989, sex='m') + FamilyMembers.create(id=simmons.id, surname='Simmons', name='Sophie', sex='f') + # showing validation + try: + FamilyMembers.create(id=simmons.id, surname='Tweed', name='Shannon', birth_year=1957, sex='f') + except ValidationError: + log.exception('INTENTIONAL VALIDATION EXCEPTION; Failed creating instance:') + FamilyMembers.create(id=simmons.id, surname='Tweed', name='Shannon', sex='f') + + log.info("### add multiple as part of a batch") + # If creating many at one time, can use a batch to minimize round-trips + hogan_id = uuid4() + with BatchQuery() as b: + FamilyMembers.batch(b).create(id=hogan_id, surname='Hogan', name='Hulk', sex='m') + FamilyMembers.batch(b).create(id=hogan_id, surname='Hogan', name='Linda', sex='f') + FamilyMembers.batch(b).create(id=hogan_id, surname='Hogan', name='Nick', sex='m') + FamilyMembers.batch(b).create(id=hogan_id, surname='Hogan', name='Brooke', sex='f') + + log.info("### All members") + for m in FamilyMembers.all(): + print m, m.birth_year, m.sex + + log.info("### Select by partition key") + for m in FamilyMembers.objects(id=simmons.id): + print m, m.birth_year, m.sex + + log.info("### Constrain on clustering key") + for m in FamilyMembers.objects(id=simmons.id, surname=simmons.surname): + print m, m.birth_year, m.sex + + log.info("### Delete a record") + FamilyMembers(id=hogan_id, surname='Hogan', name='Linda').delete() + for m in FamilyMembers.objects(id=hogan_id): + print m, m.birth_year, m.sex + + management.drop_keyspace(KEYSPACE) + +if __name__ == "__main__": + main() From ef1472b3303ad22c3dc1aad2a7d63c229eb5d061 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 11 Feb 2015 11:46:26 -0600 Subject: [PATCH 1224/3726] Added some additional logging around connection setup and schema management. --- cassandra/cqlengine/connection.py | 15 +++++++++++++++ cassandra/cqlengine/exceptions.py | 1 + cassandra/cqlengine/management.py | 14 ++++++++++++-- cassandra/cqlengine/models.py | 1 - 4 files changed, 28 insertions(+), 3 deletions(-) diff --git a/cassandra/cqlengine/connection.py b/cassandra/cqlengine/connection.py index 6c5d41526d..253598c25d 100644 --- a/cassandra/cqlengine/connection.py +++ b/cassandra/cqlengine/connection.py @@ -45,10 +45,16 @@ def default(): (except for row_factory) """ global cluster, session + + if session: + log.warn("configuring new connection for cqlengine when one was already set") + cluster = Cluster() session = cluster.connect() session.row_factory = dict_factory + log.debug("cqlengine connection initialized with default session to localhost") + def set_session(s): """ @@ -58,11 +64,17 @@ def set_session(s): This may be relaxed in the future """ global cluster, session + + if session: + log.warn("configuring new connection for cqlengine when one was already set") + if s.row_factory is not dict_factory: raise CQLEngineException("Failed to initialize: 'Session.row_factory' must be 'dict_factory'.") session = s cluster = s.cluster + log.debug("cqlengine connection initialized with %s", s) + def setup( hosts, @@ -104,8 +116,10 @@ def setup( cluster = Cluster(hosts, **kwargs) try: session = cluster.connect() + log.debug("cqlengine connection initialized with internally created session") except NoHostAvailable: if retry_connect: + log.warn("connect failed, setting up for re-attempt on first use") kwargs['default_keyspace'] = default_keyspace kwargs['consistency'] = consistency kwargs['lazy_connect'] = False @@ -157,6 +171,7 @@ def get_cluster(): def handle_lazy_connect(): global lazy_connect_args if lazy_connect_args: + log.debug("lazy connect") hosts, kwargs = lazy_connect_args lazy_connect_args = None setup(hosts, **kwargs) diff --git a/cassandra/cqlengine/exceptions.py b/cassandra/cqlengine/exceptions.py index 09327687e5..6bc6e62425 100644 --- a/cassandra/cqlengine/exceptions.py +++ b/cassandra/cqlengine/exceptions.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. + class CQLEngineException(Exception): pass diff --git a/cassandra/cqlengine/management.py b/cassandra/cqlengine/management.py index 7c7fcff44b..958d252fe2 100644 --- a/cassandra/cqlengine/management.py +++ b/cassandra/cqlengine/management.py @@ -130,12 +130,14 @@ def _create_keyspace(name, durable_writes, strategy_class, strategy_options): else: log.info("Not creating keyspace %s because it already exists", name) + def delete_keyspace(name): msg = "Deprecated. Use drop_keyspace instead" warnings.warn(msg, DeprecationWarning) log.warn(msg) drop_keyspace(name) + def drop_keyspace(name): """ Drops a keyspace, if it exists. @@ -156,6 +158,8 @@ def sync_table(model): """ Inspects the model and creates / updates the corresponding table and columns. + This function can only add fields that are not part of the primary key. + Note that the attributes removed from the model are not deleted on the database. They become effectively ignored by (will not show up on) the model. @@ -184,6 +188,7 @@ def sync_table(model): # check for an existing column family if raw_cf_name not in tables: + log.debug("sync_table creating new table %s", cf_name) qs = get_create_table(model) try: @@ -194,20 +199,26 @@ def sync_table(model): if "Cannot add already existing column family" not in unicode(ex): raise else: + log.debug("sync_table checking existing table %s", cf_name) # see if we're missing any columns fields = get_fields(model) field_names = [x.name for x in fields] + model_fields = set() for name, col in model._columns.items(): if col.primary_key or col.partition_key: continue # we can't mess with the PK + model_fields.add(name) if col.db_field_name in field_names: continue # skip columns already defined # add missing column using the column def query = "ALTER TABLE {} add {}".format(cf_name, col.get_column_def()) - log.debug(query) execute(query) + db_fields_not_in_model = model_fields.symmetric_difference(field_names) + if db_fields_not_in_model: + log.info("Table %s has fields not referenced by model: %s", cf_name, db_fields_not_in_model) + update_compaction(model) table = cluster.metadata.keyspaces[ks_name].tables[raw_cf_name] @@ -391,7 +402,6 @@ def update_compaction(model): options = json.dumps(options).replace('"', "'") cf_name = model.column_family_name() query = "ALTER TABLE {} with compaction = {}".format(cf_name, options) - log.debug(query) execute(query) return True diff --git a/cassandra/cqlengine/models.py b/cassandra/cqlengine/models.py index 85d432b8e0..980dffc5a5 100644 --- a/cassandra/cqlengine/models.py +++ b/cassandra/cqlengine/models.py @@ -42,7 +42,6 @@ class hybrid_classmethod(object): Allows a method to behave as both a class method and normal instance method depending on how it's called """ - def __init__(self, clsmethod, instmethod): self.clsmethod = clsmethod self.instmethod = instmethod From 4e348952e84d0d8c9488e8a5de6aec60cd97bbab Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 11 Feb 2015 14:42:00 -0600 Subject: [PATCH 1225/3726] Deprecate 'polymorphic' model API attributes. --- cassandra/cqlengine/columns.py | 34 ++++++-- cassandra/cqlengine/models.py | 81 ++++++++++++------- docs/api/cassandra/cqlengine/columns.rst | 2 + docs/api/cassandra/cqlengine/models.rst | 4 +- docs/cqlengine/models.rst | 32 ++++---- .../cqlengine/model/test_polymorphism.py | 8 +- 6 files changed, 103 insertions(+), 58 deletions(-) diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index 3be8bfefb9..8548355b60 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -13,16 +13,19 @@ # limitations under the License. from copy import deepcopy, copy -from datetime import datetime -from datetime import date +from datetime import date, datetime +import logging import re import six import sys +import warnings from cassandra.cqltypes import DateType from cassandra.encoder import cql_quote from cassandra.cqlengine.exceptions import ValidationError +log = logging.getLogger(__name__) + # move to central spot class UnicodeMixin(object): @@ -147,8 +150,20 @@ class Column(object): polymorphic_key = False """ - boolean, if set to True, this column will be used for saving and loading instances - of polymorphic tables + *Deprecated* + + see :attr:`~.discriminator_column` + """ + + discriminator_column = False + """ + boolean, if set to True, this column will be used for discriminating records + of inherited models. + + Should only be set on a column of an abstract model being used for inheritance. + + There may only be one discriminator column per model. See :attr:`~.__discriminator_value__` + for how to specify the value of this column on specialized models. """ static = False @@ -165,6 +180,7 @@ def __init__(self, required=False, clustering_order=None, polymorphic_key=False, + discriminator_column=False, static=False): self.partition_key = partition_key self.primary_key = partition_key or primary_key @@ -173,7 +189,15 @@ def __init__(self, self.default = default self.required = required self.clustering_order = clustering_order - self.polymorphic_key = polymorphic_key + + if polymorphic_key: + msg = "polymorphic_key is deprecated. Use discriminator_column instead." + warnings.warn(msg, DeprecationWarning) + log.warn(msg) + + self.discriminator_column = discriminator_column or polymorphic_key + self.polymorphic_key = self.discriminator_column + # the column name in the model definition self.column_name = None self.static = static diff --git a/cassandra/cqlengine/models.py b/cassandra/cqlengine/models.py index 980dffc5a5..f04abfa9af 100644 --- a/cassandra/cqlengine/models.py +++ b/cassandra/cqlengine/models.py @@ -12,8 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. +import logging import re import six +import warnings from cassandra.cqlengine import columns from cassandra.cqlengine.exceptions import ModelException, CQLEngineException, ValidationError @@ -22,6 +24,8 @@ from cassandra.cqlengine.query import MultipleObjectsReturned as _MultipleObjectsReturned from cassandra.util import OrderedDict +log = logging.getLogger(__name__) + class ModelDefinitionException(ModelException): pass @@ -71,14 +75,14 @@ def __get__(self, obj, model): raise CQLEngineException('cannot execute queries against abstract models') queryset = model.__queryset__(model) - # if this is a concrete polymorphic model, and the polymorphic + # if this is a concrete polymorphic model, and the discriminator # key is an indexed column, add a filter clause to only return # logical rows of the proper type if model._is_polymorphic and not model._is_polymorphic_base: - name, column = model._polymorphic_column_name, model._polymorphic_column + name, column = model._discriminator_column_name, model._discriminator_column if column.partition_key or column.index: # look for existing poly types - return queryset.filter(**{name: model.__polymorphic_key__}) + return queryset.filter(**{name: model.__discriminator_value__}) return queryset @@ -302,7 +306,8 @@ class MultipleObjectsReturned(_MultipleObjectsReturned): __default_ttl__ = None - __polymorphic_key__ = None + __polymorphic_key__ = None # DEPRECATED + __discriminator_value__ = None # compaction options __compaction__ = None @@ -378,17 +383,17 @@ def _discover_polymorphic_submodels(cls): raise ModelException('_discover_polymorphic_submodels can only be called on polymorphic base classes') def _discover(klass): - if not klass._is_polymorphic_base and klass.__polymorphic_key__ is not None: - cls._polymorphic_map[klass.__polymorphic_key__] = klass + if not klass._is_polymorphic_base and klass.__discriminator_value__ is not None: + cls._discriminator_map[klass.__discriminator_value__] = klass for subklass in klass.__subclasses__(): _discover(subklass) _discover(cls) @classmethod - def _get_model_by_polymorphic_key(cls, key): + def _get_model_by_discriminator_value(cls, key): if not cls._is_polymorphic_base: - raise ModelException('_get_model_by_polymorphic_key can only be called on polymorphic base classes') - return cls._polymorphic_map.get(key) + raise ModelException('_get_model_by_discriminator_value can only be called on polymorphic base classes') + return cls._discriminator_map.get(key) @classmethod def _construct_instance(cls, values): @@ -403,20 +408,20 @@ def _construct_instance(cls, values): field_dict = dict([(cls._db_map.get(k, k), v) for k, v in items]) if cls._is_polymorphic: - poly_key = field_dict.get(cls._polymorphic_column_name) + disc_key = field_dict.get(cls._discriminator_column_name) - if poly_key is None: - raise PolyMorphicModelException('polymorphic key was not found in values') + if disc_key is None: + raise PolyMorphicModelException('discriminator value was not found in values') poly_base = cls if cls._is_polymorphic_base else cls._polymorphic_base - klass = poly_base._get_model_by_polymorphic_key(poly_key) + klass = poly_base._get_model_by_discriminator_value(disc_key) if klass is None: poly_base._discover_polymorphic_submodels() - klass = poly_base._get_model_by_polymorphic_key(poly_key) + klass = poly_base._get_model_by_discriminator_value(disc_key) if klass is None: raise PolyMorphicModelException( - 'unrecognized polymorphic key {} for class {}'.format(poly_key, poly_base.__name__) + 'unrecognized discriminator column {} for class {}'.format(disc_key, poly_base.__name__) ) if not issubclass(klass, cls): @@ -640,7 +645,7 @@ def save(self): if self._is_polymorphic_base: raise PolyMorphicModelException('cannot save polymorphic base model') else: - setattr(self, self._polymorphic_column_name, self.__polymorphic_key__) + setattr(self, self._discriminator_column_name, self.__discriminator_value__) self.validate() self.__dmlquery__(self.__class__, self, @@ -690,7 +695,7 @@ def update(self, **values): if self._is_polymorphic_base: raise PolyMorphicModelException('cannot update polymorphic base model') else: - setattr(self, self._polymorphic_column_name, self.__polymorphic_key__) + setattr(self, self._discriminator_column_name, self.__disciminator_value__) self.validate() self.__dmlquery__(self.__class__, self, @@ -757,8 +762,15 @@ def __new__(cls, name, bases, attrs): # short circuit __abstract__ inheritance is_abstract = attrs['__abstract__'] = attrs.get('__abstract__', False) - # short circuit __polymorphic_key__ inheritance - attrs['__polymorphic_key__'] = attrs.get('__polymorphic_key__', None) + # short circuit __discriminator_value__ inheritance + # __polymorphic_key__ is deprecated + poly_key = attrs.get('__polymorphic_key__', None) + if poly_key: + msg = '__polymorphic_key__ is deprecated. Use __discriminator_value__ instead' + warnings.warn(msg, DeprecationWarning) + log.warn(msg) + attrs['__discriminator_value__'] = attrs.get('__discriminator_value__', poly_key) + attrs['__polymorphic_key__'] = attrs['__discriminator_value__'] def _transform_column(col_name, col_obj): column_dict[col_name] = col_obj @@ -771,18 +783,18 @@ def _transform_column(col_name, col_obj): column_definitions = [(k, v) for k, v in attrs.items() if isinstance(v, columns.Column)] column_definitions = sorted(column_definitions, key=lambda x: x[1].position) - is_polymorphic_base = any([c[1].polymorphic_key for c in column_definitions]) + is_polymorphic_base = any([c[1].discriminator_column for c in column_definitions]) column_definitions = [x for x in inherited_columns.items()] + column_definitions - polymorphic_columns = [c for c in column_definitions if c[1].polymorphic_key] - is_polymorphic = len(polymorphic_columns) > 0 - if len(polymorphic_columns) > 1: - raise ModelDefinitionException('only one polymorphic_key can be defined in a model, {} found'.format(len(polymorphic_columns))) + discriminator_columns = [c for c in column_definitions if c[1].discriminator_column] + is_polymorphic = len(discriminator_columns) > 0 + if len(discriminator_columns) > 1: + raise ModelDefinitionException('only one discriminator_column (polymorphic_key (deprecated)) can be defined in a model, {} found'.format(len(discriminator_columns))) - polymorphic_column_name, polymorphic_column = polymorphic_columns[0] if polymorphic_columns else (None, None) + discriminator_column_name, discriminator_column = discriminator_columns[0] if discriminator_columns else (None, None) - if isinstance(polymorphic_column, (columns.BaseContainerColumn, columns.Counter)): - raise ModelDefinitionException('counter and container columns cannot be used for polymorphic keys') + if isinstance(discriminator_column, (columns.BaseContainerColumn, columns.Counter)): + raise ModelDefinitionException('counter and container columns cannot be used as discriminator columns (polymorphic_key (deprecated)) ') # find polymorphic base class polymorphic_base = None @@ -877,9 +889,9 @@ def _get_polymorphic_base(bases): attrs['_is_polymorphic_base'] = is_polymorphic_base attrs['_is_polymorphic'] = is_polymorphic attrs['_polymorphic_base'] = polymorphic_base - attrs['_polymorphic_column'] = polymorphic_column - attrs['_polymorphic_column_name'] = polymorphic_column_name - attrs['_polymorphic_map'] = {} if is_polymorphic_base else None + attrs['_discriminator_column'] = discriminator_column + attrs['_discriminator_column_name'] = discriminator_column_name + attrs['_discriminator_map'] = {} if is_polymorphic_base else None # setup class exceptions DoesNotExistBase = None @@ -933,5 +945,12 @@ class Model(BaseModel): __polymorphic_key__ = None """ - *Optional* Specifies a value for the polymorphic key when using model inheritance. + *Deprecated.* + + see :attr:`~.__discriminator_value__` + """ + + __discriminator_value__ = None + """ + *Optional* Specifies a value for the discriminator column when using model inheritance. """ diff --git a/docs/api/cassandra/cqlengine/columns.rst b/docs/api/cassandra/cqlengine/columns.rst index 23a6157b22..36256bd3b4 100644 --- a/docs/api/cassandra/cqlengine/columns.rst +++ b/docs/api/cassandra/cqlengine/columns.rst @@ -31,6 +31,8 @@ Columns .. autoattribute:: polymorphic_key + .. autoattribute:: discriminator_column + .. autoattribute:: static Column Types diff --git a/docs/api/cassandra/cqlengine/models.rst b/docs/api/cassandra/cqlengine/models.rst index 6dd255f98a..f29dacbf13 100644 --- a/docs/api/cassandra/cqlengine/models.rst +++ b/docs/api/cassandra/cqlengine/models.rst @@ -35,7 +35,9 @@ Model .. autoattribute:: __polymorphic_key__ - See :ref:`table_polymorphism` for usage examples. + .. autoattribute:: __discriminator_value__ + + See :ref:`model_inheritance` for usage examples. *Each table can have its own set of configuration options. These can be specified on a model with the following attributes:* diff --git a/docs/cqlengine/models.rst b/docs/cqlengine/models.rst index 8ee30c365d..aef77f504d 100644 --- a/docs/cqlengine/models.rst +++ b/docs/cqlengine/models.rst @@ -122,10 +122,10 @@ extend the model's validation method: *Note*: while not required, the convention is to raise a ``ValidationError`` (``from cqlengine import ValidationError``) if validation fails. -.. _table_polymorphism: +.. _model_inheritance: -Table Polymorphism -================== +Model Inheritance +================= It is possible to save and load different model classes using a single CQL table. This is useful in situations where you have different object types that you want to store in a single cassandra row. @@ -137,7 +137,7 @@ For instance, suppose you want a table that stores rows of pets owned by an owne __table_name__ = 'pet' owner_id = UUID(primary_key=True) pet_id = UUID(primary_key=True) - pet_type = Text(polymorphic_key=True) + pet_type = Text(discriminator_column=True) name = Text() def eat(self, food): @@ -147,14 +147,14 @@ For instance, suppose you want a table that stores rows of pets owned by an owne pass class Cat(Pet): - __polymorphic_key__ = 'cat' + __discriminator_value__ = 'cat' cuteness = Float() def tear_up_couch(self): pass class Dog(Pet): - __polymorphic_key__ = 'dog' + __discriminator_value__ = 'dog' fierceness = Float() def bark_all_night(self): @@ -164,19 +164,17 @@ After calling ``sync_table`` on each of these tables, the columns defined in eac ``pet`` table. Additionally, saving ``Cat`` and ``Dog`` models will save the meta data needed to identify each row as either a cat or dog. -To setup a polymorphic model structure, follow these steps +To setup a model structure with inheritance, follow these steps -1. Create a base model with a column set as the polymorphic_key (set ``polymorphic_key=True`` in the column definition) -2. Create subclass models, and define a unique ``__polymorphic_key__`` value on each +1. Create a base model with a column set as the distriminator (``distriminator_column=True`` in the column definition) +2. Create subclass models, and define a unique ``__discriminator_value__`` value on each 3. Run ``sync_table`` on each of the sub tables -**About the polymorphic key** +**About the discriminator value** -The polymorphic key is what cqlengine uses under the covers to map logical cql rows to the appropriate model type. The -base model maintains a map of polymorphic keys to subclasses. When a polymorphic model is saved, this value is automatically -saved into the polymorphic key column. You can set the polymorphic key column to any column type that you like, with -the exception of container and counter columns, although ``Integer`` columns make the most sense. Additionally, if you -set ``index=True`` on your polymorphic key column, you can execute queries against polymorphic subclasses, and a +The discriminator value is what cqlengine uses under the covers to map logical cql rows to the appropriate model type. The +base model maintains a map of discriminator values to subclasses. When a specialized model is saved, its discriminator value is +automatically saved into the discriminator column. The discriminator column may be any column type except counter and container types. +Additionally, if you set ``index=True`` on your discriminator column, you can execute queries against specialized subclasses, and a ``WHERE`` clause will be automatically added to your query, returning only rows of that type. Note that you must -define a unique ``__polymorphic_key__`` value to each subclass, and that you can only assign a single polymorphic -key column per model +define a unique ``__discriminator_value__`` to each subclass, and that you can only assign a single discriminator column per model. diff --git a/tests/integration/cqlengine/model/test_polymorphism.py b/tests/integration/cqlengine/model/test_polymorphism.py index f272e42fb5..f40f008e7c 100644 --- a/tests/integration/cqlengine/model/test_polymorphism.py +++ b/tests/integration/cqlengine/model/test_polymorphism.py @@ -64,11 +64,11 @@ class M1(Base): assert Base._is_polymorphic_base assert not M1._is_polymorphic_base - assert Base._polymorphic_column is Base._columns['type1'] - assert M1._polymorphic_column is M1._columns['type1'] + assert Base._discriminator_column is Base._columns['type1'] + assert M1._discriminator_column is M1._columns['type1'] - assert Base._polymorphic_column_name == 'type1' - assert M1._polymorphic_column_name == 'type1' + assert Base._discriminator_column_name == 'type1' + assert M1._discriminator_column_name == 'type1' def test_table_names_are_inherited_from_poly_base(self): class Base(models.Model): From e9702534bca24af8163c508fc675b6d3634dc6d4 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 11 Feb 2015 15:44:28 -0600 Subject: [PATCH 1226/3726] Reorganize exceptions, centralize UnicodeMixin --- cassandra/cqlengine/__init__.py | 18 ++++++++++++++ cassandra/cqlengine/columns.py | 12 ++-------- cassandra/cqlengine/connection.py | 11 +++++---- cassandra/cqlengine/exceptions.py | 24 ++++--------------- cassandra/cqlengine/functions.py | 12 +--------- cassandra/cqlengine/management.py | 3 +-- cassandra/cqlengine/models.py | 18 ++++++++------ cassandra/cqlengine/named.py | 2 +- cassandra/cqlengine/operators.py | 11 +-------- cassandra/cqlengine/query.py | 13 +++++++--- cassandra/cqlengine/statements.py | 9 +------ .../model/test_class_construction.py | 5 ++-- .../cqlengine/model/test_polymorphism.py | 6 ++--- .../cqlengine/query/test_datetime_queries.py | 3 +-- .../integration/cqlengine/test_ifnotexists.py | 3 +-- .../integration/cqlengine/test_transaction.py | 3 +-- 16 files changed, 65 insertions(+), 88 deletions(-) diff --git a/cassandra/cqlengine/__init__.py b/cassandra/cqlengine/__init__.py index 684a94af96..38a02fd3ec 100644 --- a/cassandra/cqlengine/__init__.py +++ b/cassandra/cqlengine/__init__.py @@ -12,6 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +import six + + # compaction SizeTieredCompactionStrategy = "SizeTieredCompactionStrategy" LeveledCompactionStrategy = "LeveledCompactionStrategy" @@ -21,3 +24,18 @@ CACHING_KEYS_ONLY = "KEYS_ONLY" CACHING_ROWS_ONLY = "ROWS_ONLY" CACHING_NONE = "NONE" + + +class CQLEngineException(Exception): + pass + + +class ValidationError(CQLEngineException): + pass + + +class UnicodeMixin(object): + if six.PY3: + __str__ = lambda x: x.__unicode__() + else: + __str__ = lambda x: six.text_type(x).encode('utf-8') diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index 8548355b60..22bc07d39a 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -17,22 +17,14 @@ import logging import re import six -import sys import warnings from cassandra.cqltypes import DateType from cassandra.encoder import cql_quote -from cassandra.cqlengine.exceptions import ValidationError - -log = logging.getLogger(__name__) +from cassandra.cqlengine import ValidationError -# move to central spot -class UnicodeMixin(object): - if sys.version_info > (3, 0): - __str__ = lambda x: x.__unicode__() - else: - __str__ = lambda x: six.text_type(x).encode('utf-8') +log = logging.getLogger(__name__) class BaseValueManager(object): diff --git a/cassandra/cqlengine/connection.py b/cassandra/cqlengine/connection.py index 253598c25d..d62063b3ba 100644 --- a/cassandra/cqlengine/connection.py +++ b/cassandra/cqlengine/connection.py @@ -19,7 +19,8 @@ from cassandra import ConsistencyLevel from cassandra.cluster import Cluster, _NOT_SET, NoHostAvailable from cassandra.query import SimpleStatement, Statement, dict_factory -from cassandra.cqlengine.exceptions import CQLEngineException, UndefinedKeyspaceException + +from cassandra.cqlengine import CQLEngineException from cassandra.cqlengine.statements import BaseCQLStatement @@ -27,10 +28,6 @@ NOT_SET = _NOT_SET # required for passing timeout to Session.execute - -class CQLConnectionError(CQLEngineException): - pass - Host = namedtuple('Host', ['name', 'port']) cluster = None @@ -39,6 +36,10 @@ class CQLConnectionError(CQLEngineException): default_consistency_level = ConsistencyLevel.ONE +class UndefinedKeyspaceException(CQLEngineException): + pass + + def default(): """ Configures the global mapper connection to localhost, using the driver defaults diff --git a/cassandra/cqlengine/exceptions.py b/cassandra/cqlengine/exceptions.py index 6bc6e62425..8e80f71ae6 100644 --- a/cassandra/cqlengine/exceptions.py +++ b/cassandra/cqlengine/exceptions.py @@ -12,26 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. +import logging +import warnings -class CQLEngineException(Exception): - pass +import cassandra.cqlengine -class ModelException(CQLEngineException): - pass +CQLEngineException = cassandra.cqlengine.CQLEngineException - -class ValidationError(CQLEngineException): - pass - - -class UndefinedKeyspaceException(CQLEngineException): - pass - - -class LWTException(CQLEngineException): - pass - - -class IfNotExistsWithCounterColumn(CQLEngineException): - pass +ValidationError = cassandra.cqlengine.ValidationError diff --git a/cassandra/cqlengine/functions.py b/cassandra/cqlengine/functions.py index b00f245661..5c6e4c7cd6 100644 --- a/cassandra/cqlengine/functions.py +++ b/cassandra/cqlengine/functions.py @@ -13,18 +13,8 @@ # limitations under the License. from datetime import datetime -import six -import sys -from cassandra.cqlengine.exceptions import ValidationError -# move to central spot - - -class UnicodeMixin(object): - if sys.version_info > (3, 0): - __str__ = lambda x: x.__unicode__() - else: - __str__ = lambda x: six.text_type(x).encode('utf-8') +from cassandra.cqlengine import UnicodeMixin, ValidationError class QueryValue(UnicodeMixin): diff --git a/cassandra/cqlengine/management.py b/cassandra/cqlengine/management.py index 958d252fe2..261bcabf4e 100644 --- a/cassandra/cqlengine/management.py +++ b/cassandra/cqlengine/management.py @@ -19,9 +19,8 @@ import warnings from cassandra.metadata import KeyspaceMetadata -from cassandra.cqlengine import SizeTieredCompactionStrategy, LeveledCompactionStrategy +from cassandra.cqlengine import CQLEngineException, SizeTieredCompactionStrategy, LeveledCompactionStrategy from cassandra.cqlengine.connection import execute, get_cluster -from cassandra.cqlengine.exceptions import CQLEngineException from cassandra.cqlengine.models import Model from cassandra.cqlengine.named import NamedTable diff --git a/cassandra/cqlengine/models.py b/cassandra/cqlengine/models.py index f04abfa9af..0ef949d256 100644 --- a/cassandra/cqlengine/models.py +++ b/cassandra/cqlengine/models.py @@ -17,8 +17,8 @@ import six import warnings +from cassandra.cqlengine import CQLEngineException, ValidationError from cassandra.cqlengine import columns -from cassandra.cqlengine.exceptions import ModelException, CQLEngineException, ValidationError from cassandra.cqlengine.query import ModelQuerySet, DMLQuery, AbstractQueryableColumn, NOT_SET from cassandra.cqlengine.query import DoesNotExist as _DoesNotExist from cassandra.cqlengine.query import MultipleObjectsReturned as _MultipleObjectsReturned @@ -27,11 +27,15 @@ log = logging.getLogger(__name__) +class ModelException(CQLEngineException): + pass + + class ModelDefinitionException(ModelException): pass -class PolyMorphicModelException(ModelException): +class PolymorphicModelException(ModelException): pass @@ -411,7 +415,7 @@ def _construct_instance(cls, values): disc_key = field_dict.get(cls._discriminator_column_name) if disc_key is None: - raise PolyMorphicModelException('discriminator value was not found in values') + raise PolymorphicModelException('discriminator value was not found in values') poly_base = cls if cls._is_polymorphic_base else cls._polymorphic_base @@ -420,12 +424,12 @@ def _construct_instance(cls, values): poly_base._discover_polymorphic_submodels() klass = poly_base._get_model_by_discriminator_value(disc_key) if klass is None: - raise PolyMorphicModelException( + raise PolymorphicModelException( 'unrecognized discriminator column {} for class {}'.format(disc_key, poly_base.__name__) ) if not issubclass(klass, cls): - raise PolyMorphicModelException( + raise PolymorphicModelException( '{} is not a subclass of {}'.format(klass.__name__, cls.__name__) ) @@ -643,7 +647,7 @@ def save(self): # handle polymorphic models if self._is_polymorphic: if self._is_polymorphic_base: - raise PolyMorphicModelException('cannot save polymorphic base model') + raise PolymorphicModelException('cannot save polymorphic base model') else: setattr(self, self._discriminator_column_name, self.__discriminator_value__) @@ -693,7 +697,7 @@ def update(self, **values): # handle polymorphic models if self._is_polymorphic: if self._is_polymorphic_base: - raise PolyMorphicModelException('cannot update polymorphic base model') + raise PolymorphicModelException('cannot update polymorphic base model') else: setattr(self, self._discriminator_column_name, self.__disciminator_value__) diff --git a/cassandra/cqlengine/named.py b/cassandra/cqlengine/named.py index c8b1c4c20c..b38c07820e 100644 --- a/cassandra/cqlengine/named.py +++ b/cassandra/cqlengine/named.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from cassandra.cqlengine.exceptions import CQLEngineException +from cassandra.cqlengine import CQLEngineException from cassandra.cqlengine.query import AbstractQueryableColumn, SimpleQuerySet from cassandra.cqlengine.query import DoesNotExist as _DoesNotExist from cassandra.cqlengine.query import MultipleObjectsReturned as _MultipleObjectsReturned diff --git a/cassandra/cqlengine/operators.py b/cassandra/cqlengine/operators.py index fd18f58537..08c8ccb318 100644 --- a/cassandra/cqlengine/operators.py +++ b/cassandra/cqlengine/operators.py @@ -12,22 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -import six -import sys +from cassandra.cqlengine import UnicodeMixin class QueryOperatorException(Exception): pass -# move to central spot -class UnicodeMixin(object): - if sys.version_info > (3, 0): - __str__ = lambda x: x.__unicode__() - else: - __str__ = lambda x: six.text_type(x).encode('utf-8') - - class BaseQueryOperator(UnicodeMixin): # The symbol that identifies this operator in kwargs # ie: colname__ diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index 10da492168..62000f757b 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -17,10 +17,9 @@ import time import six -from cassandra.cqlengine import columns +from cassandra.cqlengine import columns, CQLEngineException, ValidationError, UnicodeMixin from cassandra.cqlengine.connection import execute, NOT_SET -from cassandra.cqlengine.exceptions import CQLEngineException, ValidationError, LWTException, IfNotExistsWithCounterColumn -from cassandra.cqlengine.functions import Token, BaseQueryFunction, QueryValue, UnicodeMixin +from cassandra.cqlengine.functions import Token, BaseQueryFunction, QueryValue from cassandra.cqlengine.operators import (InOperator, EqualsOperator, GreaterThanOperator, GreaterThanOrEqualOperator, LessThanOperator, LessThanOrEqualOperator, BaseWhereOperator) @@ -36,6 +35,14 @@ class QueryException(CQLEngineException): pass +class IfNotExistsWithCounterColumn(CQLEngineException): + pass + + +class LWTException(CQLEngineException): + pass + + class DoesNotExist(QueryException): pass diff --git a/cassandra/cqlengine/statements.py b/cassandra/cqlengine/statements.py index 97dd4565a4..7c9f281e90 100644 --- a/cassandra/cqlengine/statements.py +++ b/cassandra/cqlengine/statements.py @@ -15,8 +15,8 @@ from datetime import datetime, timedelta import time import six -import sys +from cassandra.cqlengine import UnicodeMixin from cassandra.cqlengine.functions import QueryValue from cassandra.cqlengine.operators import BaseWhereOperator, InOperator @@ -25,13 +25,6 @@ class StatementException(Exception): pass -class UnicodeMixin(object): - if sys.version_info > (3, 0): - __str__ = lambda x: x.__unicode__() - else: - __str__ = lambda x: six.text_type(x).encode('utf-8') - - class ValueQuoter(UnicodeMixin): def __init__(self, value): diff --git a/tests/integration/cqlengine/model/test_class_construction.py b/tests/integration/cqlengine/model/test_class_construction.py index 015d992991..141f6f625b 100644 --- a/tests/integration/cqlengine/model/test_class_construction.py +++ b/tests/integration/cqlengine/model/test_class_construction.py @@ -15,9 +15,8 @@ from uuid import uuid4 import warnings -from cassandra.cqlengine import columns -from cassandra.cqlengine.exceptions import ModelException, CQLEngineException -from cassandra.cqlengine.models import Model, ModelDefinitionException, ColumnQueryEvaluator +from cassandra.cqlengine import columns, CQLEngineException +from cassandra.cqlengine.models import Model, ModelException, ModelDefinitionException, ColumnQueryEvaluator from cassandra.cqlengine.query import ModelQuerySet, DMLQuery from tests.integration.cqlengine.base import BaseCassEngTestCase diff --git a/tests/integration/cqlengine/model/test_polymorphism.py b/tests/integration/cqlengine/model/test_polymorphism.py index f40f008e7c..cf4a72e7a8 100644 --- a/tests/integration/cqlengine/model/test_polymorphism.py +++ b/tests/integration/cqlengine/model/test_polymorphism.py @@ -120,7 +120,7 @@ def tearDownClass(cls): management.drop_table(Poly2) def test_saving_base_model_fails(self): - with self.assertRaises(models.PolyMorphicModelException): + with self.assertRaises(models.PolymorphicModelException): PolyBase.create() def test_saving_subclass_saves_poly_key(self): @@ -208,9 +208,9 @@ def test_subclassed_model_results_work_properly(self): assert len(list(UnindexedPoly2.objects(partition=p1.partition, cluster__in=[p2.cluster, p3.cluster]))) == 2 def test_conflicting_type_results(self): - with self.assertRaises(models.PolyMorphicModelException): + with self.assertRaises(models.PolymorphicModelException): list(UnindexedPoly1.objects(partition=self.p1.partition)) - with self.assertRaises(models.PolyMorphicModelException): + with self.assertRaises(models.PolymorphicModelException): list(UnindexedPoly2.objects(partition=self.p1.partition)) diff --git a/tests/integration/cqlengine/query/test_datetime_queries.py b/tests/integration/cqlengine/query/test_datetime_queries.py index f69d97c0ea..936f4fc78b 100644 --- a/tests/integration/cqlengine/query/test_datetime_queries.py +++ b/tests/integration/cqlengine/query/test_datetime_queries.py @@ -17,10 +17,9 @@ from tests.integration.cqlengine.base import BaseCassEngTestCase -from cassandra.cqlengine.exceptions import ModelException from cassandra.cqlengine.management import sync_table from cassandra.cqlengine.management import drop_table -from cassandra.cqlengine.models import Model +from cassandra.cqlengine.models import Model, ModelException from cassandra.cqlengine import columns from cassandra.cqlengine import query diff --git a/tests/integration/cqlengine/test_ifnotexists.py b/tests/integration/cqlengine/test_ifnotexists.py index d1c5b4ec28..9cc4d3c556 100644 --- a/tests/integration/cqlengine/test_ifnotexists.py +++ b/tests/integration/cqlengine/test_ifnotexists.py @@ -17,10 +17,9 @@ from uuid import uuid4 from cassandra.cqlengine import columns -from cassandra.cqlengine.exceptions import LWTException, IfNotExistsWithCounterColumn from cassandra.cqlengine.management import sync_table, drop_table from cassandra.cqlengine.models import Model -from cassandra.cqlengine.query import BatchQuery +from cassandra.cqlengine.query import BatchQuery, LWTException, IfNotExistsWithCounterColumn from tests.integration.cqlengine.base import BaseCassEngTestCase from tests.integration import PROTOCOL_VERSION diff --git a/tests/integration/cqlengine/test_transaction.py b/tests/integration/cqlengine/test_transaction.py index b2b2511a03..bc64dc8452 100644 --- a/tests/integration/cqlengine/test_transaction.py +++ b/tests/integration/cqlengine/test_transaction.py @@ -20,8 +20,7 @@ from cassandra.cqlengine import columns from cassandra.cqlengine.management import sync_table, drop_table from cassandra.cqlengine.models import Model -from cassandra.cqlengine.exceptions import LWTException -from cassandra.cqlengine.query import BatchQuery +from cassandra.cqlengine.query import BatchQuery, LWTException from cassandra.cqlengine.statements import TransactionClause from tests.integration.cqlengine.base import BaseCassEngTestCase From 44c3b2c25192bcf9290660632cec502119a85726 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 11 Feb 2015 15:52:50 -0600 Subject: [PATCH 1227/3726] raise if getting cluster without setting up connection Better error feedback if management functions called before connection setup. --- cassandra/cqlengine/connection.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cassandra/cqlengine/connection.py b/cassandra/cqlengine/connection.py index d62063b3ba..b14570ae6a 100644 --- a/cassandra/cqlengine/connection.py +++ b/cassandra/cqlengine/connection.py @@ -166,6 +166,8 @@ def get_session(): def get_cluster(): handle_lazy_connect() + if not cluster: + raise CQLEngineException("%s.cluster is not configured. Call one of the setup or default functions first." % __name__) return cluster From f753c30c541e52acf5fcd55517e62da7c54bc36f Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 11 Feb 2015 16:32:33 -0600 Subject: [PATCH 1228/3726] Expand cqlengine keyspace management tests to include new functions --- tests/integration/cqlengine/__init__.py | 8 ++- .../cqlengine/management/test_management.py | 53 ++++++++++++++----- 2 files changed, 45 insertions(+), 16 deletions(-) diff --git a/tests/integration/cqlengine/__init__.py b/tests/integration/cqlengine/__init__.py index 42cde4be2e..cfc72fd883 100644 --- a/tests/integration/cqlengine/__init__.py +++ b/tests/integration/cqlengine/__init__.py @@ -12,13 +12,17 @@ # See the License for the specific language governing permissions and # limitations under the License. +import warnings + from cassandra.cqlengine import connection -from cassandra.cqlengine.management import create_keyspace +from cassandra.cqlengine.management import create_keyspace_simple from tests.integration import use_single_node, PROTOCOL_VERSION def setup_package(): + warnings.simplefilter('always') # for testing warnings, make sure all are let through + use_single_node() keyspace = 'cqlengine_test' @@ -26,4 +30,4 @@ def setup_package(): protocol_version=PROTOCOL_VERSION, default_keyspace=keyspace) - create_keyspace(keyspace, replication_factor=1, strategy_class="SimpleStrategy") + create_keyspace_simple(keyspace, 1) diff --git a/tests/integration/cqlengine/management/test_management.py b/tests/integration/cqlengine/management/test_management.py index 175b3cb599..f209af35f9 100644 --- a/tests/integration/cqlengine/management/test_management.py +++ b/tests/integration/cqlengine/management/test_management.py @@ -13,10 +13,12 @@ # limitations under the License. import mock + from unittest import skipUnless +import warnings from cassandra.cqlengine import CACHING_ALL, CACHING_NONE -from cassandra.cqlengine.connection import get_session +from cassandra.cqlengine.connection import get_session, get_cluster from cassandra.cqlengine.exceptions import CQLEngineException from cassandra.cqlengine import management from cassandra.cqlengine.management import get_fields, sync_table, drop_table @@ -27,17 +29,43 @@ from tests.integration.cqlengine.base import BaseCassEngTestCase from tests.integration.cqlengine.query.test_queryset import TestModel -class CreateKeyspaceTest(BaseCassEngTestCase): - def test_create_succeeeds(self): - management.create_keyspace('test_keyspace', strategy_class="SimpleStrategy", replication_factor=1) - management.delete_keyspace('test_keyspace') +class KeyspaceManagementTest(BaseCassEngTestCase): + def test_create_drop_succeeeds(self): + cluster = get_cluster() -class DeleteTableTest(BaseCassEngTestCase): + keyspace_ss = 'test_ks_ss' + self.assertFalse(keyspace_ss in cluster.metadata.keyspaces) + management.create_keyspace_simple(keyspace_ss, 2) + self.assertTrue(keyspace_ss in cluster.metadata.keyspaces) - def test_multiple_deletes_dont_fail(self): - """ + management.drop_keyspace(keyspace_ss) + + self.assertFalse(keyspace_ss in cluster.metadata.keyspaces) + with warnings.catch_warnings(record=True) as w: + management.create_keyspace(keyspace_ss, strategy_class="SimpleStrategy", replication_factor=1) + self.assertEqual(len(w), 1) + self.assertEqual(w[-1].category, DeprecationWarning) + self.assertTrue(keyspace_ss in cluster.metadata.keyspaces) + + management.drop_keyspace(keyspace_ss) + self.assertFalse(keyspace_ss in cluster.metadata.keyspaces) + + keyspace_nts = 'test_ks_nts' + self.assertFalse(keyspace_nts in cluster.metadata.keyspaces) + management.create_keyspace_simple(keyspace_nts, 2) + self.assertTrue(keyspace_nts in cluster.metadata.keyspaces) + + with warnings.catch_warnings(record=True) as w: + management.delete_keyspace(keyspace_nts) + self.assertEqual(len(w), 1) + self.assertEqual(w[-1].category, DeprecationWarning) - """ + self.assertFalse(keyspace_nts in cluster.metadata.keyspaces) + + +class DropTableTest(BaseCassEngTestCase): + + def test_multiple_deletes_dont_fail(self): sync_table(TestModel) drop_table(TestModel) @@ -280,12 +308,9 @@ class StaticModel(Model): drop_table(StaticModel) - from mock import patch - - from cassandra.cqlengine.connection import get_session session = get_session() - with patch.object(session, "execute", wraps=session.execute) as m: + with mock.patch.object(session, "execute", wraps=session.execute) as m: sync_table(StaticModel) assert m.call_count > 0 @@ -295,7 +320,7 @@ class StaticModel(Model): # if we sync again, we should not apply an alter w/ a static sync_table(StaticModel) - with patch.object(session, "execute", wraps=session.execute) as m2: + with mock.patch.object(session, "execute", wraps=session.execute) as m2: sync_table(StaticModel) assert len(m2.call_args_list) == 1 From 192a959706da49a78c8ff28754cd750db9cf2beb Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 11 Feb 2015 16:35:06 -0600 Subject: [PATCH 1229/3726] remove vacated cqlengine/exceptions module --- cassandra/cqlengine/exceptions.py | 23 ----------------------- 1 file changed, 23 deletions(-) delete mode 100644 cassandra/cqlengine/exceptions.py diff --git a/cassandra/cqlengine/exceptions.py b/cassandra/cqlengine/exceptions.py deleted file mode 100644 index 8e80f71ae6..0000000000 --- a/cassandra/cqlengine/exceptions.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright 2015 DataStax, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging -import warnings - -import cassandra.cqlengine - - -CQLEngineException = cassandra.cqlengine.CQLEngineException - -ValidationError = cassandra.cqlengine.ValidationError From ecb90de5456b3dff3ecc0864408cab435bc78dc0 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 12 Feb 2015 10:53:34 -0600 Subject: [PATCH 1230/3726] Add install warning when cqlengine present --- setup.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/setup.py b/setup.py index 106233901b..df3aef2797 100644 --- a/setup.py +++ b/setup.py @@ -14,6 +14,7 @@ from __future__ import print_function import sys +import warnings if __name__ == '__main__' and sys.argv[1] == "gevent_nosetests": print("Running gevent tests") @@ -64,6 +65,14 @@ class gevent_nosetests(nosetests): class eventlet_nosetests(nosetests): description = "run nosetests with eventlet monkey patching" +has_cqlengine = False +if __name__ == '__main__' and sys.argv[1] == "install": + try: + import cqlengine + has_cqlengine = True + except ImportError: + pass + class DocCommand(Command): @@ -271,3 +280,8 @@ def run_setup(extensions): extensions.remove(failure.ext) else: break + +if has_cqlengine: + warnings.warn("\n#######\n'cqlengine' package is present on path: %s\n" + "cqlengine is now an integrated sub-package of this driver.\n" + "It is recommended to remove this package to reduce the chance for conflicting usage" % cqlengine.__file__) From 74fdfd76a83703022fb6553c22b9d8a5e7f3c3e8 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 12 Feb 2015 11:44:26 -0600 Subject: [PATCH 1231/3726] ENVAR warning around cqlengine schema management functions --- cassandra/cqlengine/management.py | 23 +++++++++++++++++++ .../cqlengine/management/__init__.py | 6 +++++ 2 files changed, 29 insertions(+) diff --git a/cassandra/cqlengine/management.py b/cassandra/cqlengine/management.py index 261bcabf4e..62208c3405 100644 --- a/cassandra/cqlengine/management.py +++ b/cassandra/cqlengine/management.py @@ -15,6 +15,7 @@ from collections import namedtuple import json import logging +import os import six import warnings @@ -24,6 +25,7 @@ from cassandra.cqlengine.models import Model from cassandra.cqlengine.named import NamedTable +CQLENG_ALLOW_SCHEMA_MANAGEMENT = 'CQLENG_ALLOW_SCHEMA_MANAGEMENT' Field = namedtuple('Field', ['name', 'type']) @@ -52,6 +54,9 @@ def create_keyspace(name, strategy_class, replication_factor, durable_writes=Tru :param bool durable_writes: Write log is bypassed if set to False :param \*\*replication_values: Additional values to ad to the replication options map """ + if not _allow_schema_modification(): + return + msg = "Deprecated. Use create_keyspace_simple or create_keyspace_network_topology instead" warnings.warn(msg, DeprecationWarning) log.warn(msg) @@ -120,6 +125,9 @@ def create_keyspace_network_topology(name, dc_replication_map, durable_writes=Tr def _create_keyspace(name, durable_writes, strategy_class, strategy_options): + if not _allow_schema_modification(): + return + cluster = get_cluster() if name not in cluster.metadata.keyspaces: @@ -148,6 +156,9 @@ def drop_keyspace(name): :param str name: name of keyspace to drop """ + if not _allow_schema_modification(): + return + cluster = get_cluster() if name in cluster.metadata.keyspaces: execute("DROP KEYSPACE {}".format(name)) @@ -167,6 +178,8 @@ def sync_table(model): *There are plans to guard schema-modifying functions with an environment-driven conditional.* """ + if not _allow_schema_modification(): + return if not issubclass(model, Model): raise CQLEngineException("Models must be derived from base Model.") @@ -416,6 +429,8 @@ def drop_table(model): *There are plans to guard schema-modifying functions with an environment-driven conditional.* """ + if not _allow_schema_modification(): + return # don't try to delete non existant tables meta = get_cluster().metadata @@ -428,3 +443,11 @@ def drop_table(model): execute('drop table {};'.format(model.column_family_name(include_keyspace=True))) except KeyError: pass + +def _allow_schema_modification(): + if not os.getenv(CQLENG_ALLOW_SCHEMA_MANAGEMENT): + msg = CQLENG_ALLOW_SCHEMA_MANAGEMENT + " environment variable is not set. Future versions of this package will require this variable to enable management functions." + warnings.warn(msg) + log.warn(msg) + + return True diff --git a/tests/integration/cqlengine/management/__init__.py b/tests/integration/cqlengine/management/__init__.py index 55181d93fe..7415f1bdaa 100644 --- a/tests/integration/cqlengine/management/__init__.py +++ b/tests/integration/cqlengine/management/__init__.py @@ -12,3 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +import os + +from cassandra.cqlengine.management import CQLENG_ALLOW_SCHEMA_MANAGEMENT + +def setup_package(): + os.environ[CQLENG_ALLOW_SCHEMA_MANAGEMENT] = '1' From 8f076e966a772f9e71f4b09f8375a63c5f548a92 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 12 Feb 2015 15:11:49 -0600 Subject: [PATCH 1232/3726] Make sure column types with add'l init args are documented --- cassandra/cqlengine/columns.py | 30 ++++++++----------- docs/api/cassandra/cqlengine/columns.rst | 18 ++++------- docs/conf.py | 2 +- .../cqlengine/columns/test_validation.py | 8 ++--- 4 files changed, 24 insertions(+), 34 deletions(-) diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index 22bc07d39a..4bb3f6903a 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -318,24 +318,17 @@ class Text(Column): """ Stores a UTF-8 encoded string """ - db_type = 'text' - min_length = None - """ - Sets the minimum length of this string, for validation purposes. - Defaults to 1 if this is a ``required`` column. Otherwise, None. - """ - - max_length = None - """ - Sets the maximum length of this string, for validation purposes. - """ - - def __init__(self, *args, **kwargs): - self.min_length = kwargs.pop('min_length', 1 if kwargs.get('required', False) else None) - self.max_length = kwargs.pop('max_length', None) - super(Text, self).__init__(*args, **kwargs) + def __init__(self, min_length=None, max_length=None, **kwargs): + """ + :param int min_length: Sets the minimum length of this string, for validation purposes. + Defaults to 1 if this is a ``required`` column. Otherwise, None. + :param int max_lemgth: Sets the maximum length of this string, for validation purposes. + """ + self.min_length = min_length or (1 if kwargs.get('required', False) else None) + self.max_length = max_length + super(Text, self).__init__(**kwargs) def validate(self, value): value = super(Text, self).validate(value) @@ -591,7 +584,7 @@ def to_python(self, value): class Float(Column): """ - Stores a 32-bit floating point value + Stores a floating point value """ db_type = 'double' @@ -765,6 +758,9 @@ def __nonzero__(self): return bool(self.value) def __init__(self, value_type, default=list, **kwargs): + """ + :param value_type: a column class indicating the types of the value + """ return super(List, self).__init__(value_type=value_type, default=default, **kwargs) def validate(self, value): diff --git a/docs/api/cassandra/cqlengine/columns.rst b/docs/api/cassandra/cqlengine/columns.rst index 36256bd3b4..8dcf690cbd 100644 --- a/docs/api/cassandra/cqlengine/columns.rst +++ b/docs/api/cassandra/cqlengine/columns.rst @@ -50,7 +50,7 @@ Columns of all types are initialized by passing :class:`.Column` attributes to t .. autoclass:: Boolean(**kwargs) -.. autoclass:: Counter(**kwargs) +.. autoclass:: Counter .. autoclass:: Date(**kwargs) @@ -58,23 +58,17 @@ Columns of all types are initialized by passing :class:`.Column` attributes to t .. autoclass:: Decimal(**kwargs) -.. autoclass:: Float(**kwargs) +.. autoclass:: Float .. autoclass:: Integer(**kwargs) -.. autoclass:: List(**kwargs) +.. autoclass:: List -.. autoclass:: Map(**kwargs) +.. autoclass:: Map -.. autoclass:: Set(**kwargs) +.. autoclass:: Set -.. autoclass:: Text(**kwargs) - - In addition to the :class:Column attributes, ``Text`` columns accept the following attributes for validation: - - .. autoattribute:: min_length - - .. autoattribute:: max_length +.. autoclass:: Text .. autoclass:: TimeUUID(**kwargs) diff --git a/docs/conf.py b/docs/conf.py index 988c27ca59..782044abe2 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -55,7 +55,7 @@ release = cassandra.__version__ autodoc_member_order = 'bysource' -autoclass_content = 'class' +autoclass_content = 'both' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/tests/integration/cqlengine/columns/test_validation.py b/tests/integration/cqlengine/columns/test_validation.py index 828e9f8504..8d4959b80d 100644 --- a/tests/integration/cqlengine/columns/test_validation.py +++ b/tests/integration/cqlengine/columns/test_validation.py @@ -288,14 +288,14 @@ def test_default_zero_fields_validate(self): class TestText(BaseCassEngTestCase): def test_min_length(self): - #min len defaults to 1 + # not required defaults to 0 col = Text() col.validate('') - col.validate('b') - #test not required defaults to 0 - Text(required=False).validate('') + # required defaults to 1 + with self.assertRaises(ValidationError): + Text(required=True).validate('') #test arbitrary lengths Text(min_length=0).validate('') From 7f03f079b9e03be4347d6ce15cc550b0eef9e3ca Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 12 Feb 2015 15:12:42 -0600 Subject: [PATCH 1233/3726] Remove schema mgnt warning for all cqlengine tests --- tests/integration/cqlengine/__init__.py | 4 +++- tests/integration/cqlengine/management/__init__.py | 6 ------ 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/tests/integration/cqlengine/__init__.py b/tests/integration/cqlengine/__init__.py index cfc72fd883..1d0c5234e0 100644 --- a/tests/integration/cqlengine/__init__.py +++ b/tests/integration/cqlengine/__init__.py @@ -12,16 +12,18 @@ # See the License for the specific language governing permissions and # limitations under the License. +import os import warnings from cassandra.cqlengine import connection -from cassandra.cqlengine.management import create_keyspace_simple +from cassandra.cqlengine.management import create_keyspace_simple, CQLENG_ALLOW_SCHEMA_MANAGEMENT from tests.integration import use_single_node, PROTOCOL_VERSION def setup_package(): warnings.simplefilter('always') # for testing warnings, make sure all are let through + os.environ[CQLENG_ALLOW_SCHEMA_MANAGEMENT] = '1' use_single_node() diff --git a/tests/integration/cqlengine/management/__init__.py b/tests/integration/cqlengine/management/__init__.py index 7415f1bdaa..55181d93fe 100644 --- a/tests/integration/cqlengine/management/__init__.py +++ b/tests/integration/cqlengine/management/__init__.py @@ -12,9 +12,3 @@ # See the License for the specific language governing permissions and # limitations under the License. -import os - -from cassandra.cqlengine.management import CQLENG_ALLOW_SCHEMA_MANAGEMENT - -def setup_package(): - os.environ[CQLENG_ALLOW_SCHEMA_MANAGEMENT] = '1' From 790374ede42634111d1c7a0fbf79f82195312145 Mon Sep 17 00:00:00 2001 From: Tyler Hobbs Date: Fri, 13 Feb 2015 15:49:59 -0600 Subject: [PATCH 1234/3726] Add support for CASSANDRA-7660 This experimentally adds support for CASSANDRA-7660, which returns the indexes of bind markers that correspond to partition key columns in prepared statement response metadata. --- cassandra/cluster.py | 4 ++-- cassandra/protocol.py | 35 +++++++++++++++++++++++++++++++---- cassandra/query.py | 43 +++++++++++++++++++++++-------------------- 3 files changed, 56 insertions(+), 26 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index b639af6992..40d414c743 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1534,13 +1534,13 @@ def prepare(self, query): future = ResponseFuture(self, message, query=None) try: future.send_request() - query_id, column_metadata = future.result(self.default_timeout) + query_id, column_metadata, pk_indexes = future.result(self.default_timeout) except Exception: log.exception("Error preparing query:") raise prepared_statement = PreparedStatement.from_message( - query_id, column_metadata, self.cluster.metadata, query, self.keyspace, + query_id, column_metadata, pk_indexes, self.cluster.metadata, query, self.keyspace, self._protocol_version) host = future._current_host diff --git a/cassandra/protocol.py b/cassandra/protocol.py index af279ef959..f570f487b2 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -559,7 +559,7 @@ def recv_body(cls, f, protocol_version, user_type_map): ksname = read_string(f) results = ksname elif kind == RESULT_KIND_PREPARED: - results = cls.recv_results_prepared(f, user_type_map) + results = cls.recv_results_prepared(f, protocol_version, user_type_map) elif kind == RESULT_KIND_SCHEMA_CHANGE: results = cls.recv_results_schema_change(f, protocol_version) return cls(kind, results, paging_state) @@ -578,16 +578,17 @@ def recv_results_rows(cls, f, protocol_version, user_type_map): return (paging_state, (colnames, parsed_rows)) @classmethod - def recv_results_prepared(cls, f, user_type_map): + def recv_results_prepared(cls, f, protocol_version, user_type_map): query_id = read_binary_string(f) - _, column_metadata = cls.recv_results_metadata(f, user_type_map) - return (query_id, column_metadata) + column_metadata, pk_indexes = cls.recv_prepared_metadata(f, protocol_version, user_type_map) + return (query_id, column_metadata, pk_indexes) @classmethod def recv_results_metadata(cls, f, user_type_map): flags = read_int(f) glob_tblspec = bool(flags & cls._FLAGS_GLOBAL_TABLES_SPEC) colcount = read_int(f) + if flags & cls._HAS_MORE_PAGES_FLAG: paging_state = read_binary_longstring(f) else: @@ -608,6 +609,32 @@ def recv_results_metadata(cls, f, user_type_map): column_metadata.append((colksname, colcfname, colname, coltype)) return paging_state, column_metadata + @classmethod + def recv_prepared_metadata(cls, f, protocol_version, user_type_map): + flags = read_int(f) + glob_tblspec = bool(flags & cls._FLAGS_GLOBAL_TABLES_SPEC) + colcount = read_int(f) + pk_indexes = None + if protocol_version >= 4: + num_pk_indexes = read_int(f) + pk_indexes = [read_short(f) for _ in range(num_pk_indexes)] + + if glob_tblspec: + ksname = read_string(f) + cfname = read_string(f) + column_metadata = [] + for _ in range(colcount): + if glob_tblspec: + colksname = ksname + colcfname = cfname + else: + colksname = read_string(f) + colcfname = read_string(f) + colname = read_string(f) + coltype = cls.read_type(f, user_type_map) + column_metadata.append((colksname, colcfname, colname, coltype)) + return column_metadata, pk_indexes + @classmethod def recv_results_schema_change(cls, f, protocol_version): return EventMessage.recv_schema_change(f, protocol_version) diff --git a/cassandra/query.py b/cassandra/query.py index c159e7877b..0f7db27ed8 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -353,29 +353,32 @@ def __init__(self, column_metadata, query_id, routing_key_indexes, query, keyspa self.fetch_size = fetch_size @classmethod - def from_message(cls, query_id, column_metadata, cluster_metadata, query, prepared_keyspace, protocol_version): + def from_message(cls, query_id, column_metadata, pk_indexes, cluster_metadata, query, prepared_keyspace, protocol_version): if not column_metadata: return PreparedStatement(column_metadata, query_id, None, query, prepared_keyspace, protocol_version) - partition_key_columns = None - routing_key_indexes = None - - ks_name, table_name, _, _ = column_metadata[0] - ks_meta = cluster_metadata.keyspaces.get(ks_name) - if ks_meta: - table_meta = ks_meta.tables.get(table_name) - if table_meta: - partition_key_columns = table_meta.partition_key - - # make a map of {column_name: index} for each column in the statement - statement_indexes = dict((c[2], i) for i, c in enumerate(column_metadata)) - - # a list of which indexes in the statement correspond to partition key items - try: - routing_key_indexes = [statement_indexes[c.name] - for c in partition_key_columns] - except KeyError: # we're missing a partition key component in the prepared - pass # statement; just leave routing_key_indexes as None + if pk_indexes: + routing_key_indexes = pk_indexes + else: + partition_key_columns = None + routing_key_indexes = None + + ks_name, table_name, _, _ = column_metadata[0] + ks_meta = cluster_metadata.keyspaces.get(ks_name) + if ks_meta: + table_meta = ks_meta.tables.get(table_name) + if table_meta: + partition_key_columns = table_meta.partition_key + + # make a map of {column_name: index} for each column in the statement + statement_indexes = dict((c[2], i) for i, c in enumerate(column_metadata)) + + # a list of which indexes in the statement correspond to partition key items + try: + routing_key_indexes = [statement_indexes[c.name] + for c in partition_key_columns] + except KeyError: # we're missing a partition key component in the prepared + pass # statement; just leave routing_key_indexes as None return PreparedStatement(column_metadata, query_id, routing_key_indexes, query, prepared_keyspace, protocol_version) From 08c2a043d99b7b41bd5fbc976cc288e81a94c1f7 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 18 Feb 2015 17:00:44 -0600 Subject: [PATCH 1235/3726] Add HostConnectionPool.open_connections prop this property required when metrics enabled with protocol_version=3 --- cassandra/pool.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/cassandra/pool.py b/cassandra/pool.py index a99724b06b..5c7f7ea22f 100644 --- a/cassandra/pool.py +++ b/cassandra/pool.py @@ -371,6 +371,10 @@ def get_state(self): in_flights = [connection.in_flight] if connection else [] return {'shutdown': self.is_shutdown, 'open_count': open_count, 'in_flights': in_flights} + @property + def open_count(self): + connection = self._connection + return 1 if connection and not (connection.is_closed or connection.is_defunct) else 0 _MAX_SIMULTANEOUS_CREATION = 1 _MIN_TRASH_INTERVAL = 10 From 304a05beab8ce2c1917cedf53904ce69cc1a9ec1 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 19 Feb 2015 14:12:14 -0600 Subject: [PATCH 1236/3726] Join thread when stopping heartbeat, add short-circuits to loop Fixes a reported shutdown issue with the thread, even though it's 'daemon = True': "Exception in thread Connection heartbeat (most likely raised during interpreter shutdown):" --- cassandra/connection.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/cassandra/connection.py b/cassandra/connection.py index 9a41a0d61c..b0c334590c 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -792,6 +792,9 @@ def __init__(self, interval_sec, get_connection_holders): self.daemon = True self.start() + class ShutdownException(Exception): + pass + def run(self): self._shutdown_event.wait(self._interval) while not self._shutdown_event.is_set(): @@ -802,6 +805,7 @@ def run(self): try: for connections, owner in [(o.get_connections(), o) for o in self._get_connection_holders()]: for connection in connections: + self._raise_if_stopped() if not (connection.is_defunct or connection.is_closed): if connection.is_idle: try: @@ -815,8 +819,10 @@ def run(self): else: # make sure the owner sees this defunt/closed connection owner.return_connection(connection) + self._raise_if_stopped() for f in futures: + self._raise_if_stopped() connection = f.connection try: f.wait(self._interval) @@ -830,8 +836,11 @@ def run(self): failed_connections.append((f.connection, f.owner)) for connection, owner in failed_connections: + self._raise_if_stopped() connection.defunct(Exception('Connection heartbeat failure')) owner.return_connection(connection) + except self.ShutdownException: + pass except Exception: log.error("Failed connection heartbeat", exc_info=True) @@ -840,3 +849,8 @@ def run(self): def stop(self): self._shutdown_event.set() + self.join() + + def _raise_if_stopped(self): + if self._shutdown_event.is_set(): + raise self.ShutdownException() From b428008219b4be7e177f4b2e854d98cad25589ab Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 19 Feb 2015 14:46:03 -0600 Subject: [PATCH 1237/3726] Update unit tests to work with heartbeat short-circuit --- tests/unit/test_connection.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/tests/unit/test_connection.py b/tests/unit/test_connection.py index 39ef8ce5c0..ba2d1936ce 100644 --- a/tests/unit/test_connection.py +++ b/tests/unit/test_connection.py @@ -16,7 +16,7 @@ except ImportError: import unittest # noqa -from mock import Mock, ANY, call +from mock import Mock, ANY, call, patch import six from six import BytesIO import time @@ -282,6 +282,7 @@ def test_set_connection_class(self): self.assertEqual('test', cluster.connection_class) +@patch('cassandra.connection.ConnectionHeartbeat._raise_if_stopped') class ConnectionHeartbeatTest(unittest.TestCase): @staticmethod @@ -298,10 +299,9 @@ def run_heartbeat(self, get_holders_fun, count=2, interval=0.05): ch = ConnectionHeartbeat(interval, get_holders_fun) time.sleep(interval * count) ch.stop() - ch.join() self.assertTrue(get_holders_fun.call_count) - def test_empty_connections(self): + def test_empty_connections(self, *args): count = 3 get_holders = self.make_get_holders(1) @@ -312,7 +312,7 @@ def test_empty_connections(self): holder = get_holders.return_value[0] holder.get_connections.assert_has_calls([call()] * get_holders.call_count) - def test_idle_non_idle(self): + def test_idle_non_idle(self, *args): request_id = 999 # connection.send_msg(OptionsMessage(), connection.get_request_id(), self._options_callback) @@ -342,7 +342,7 @@ def send_msg(msg, req_id, msg_callback): idle_connection.send_msg.assert_has_calls([call(ANY, request_id, ANY)] * get_holders.call_count) self.assertEqual(non_idle_connection.send_msg.call_count, 0) - def test_closed_defunct(self): + def test_closed_defunct(self, *args): get_holders = self.make_get_holders(1) closed_connection = Mock(spec=Connection, in_flight=0, is_idle=False, is_defunct=False, is_closed=True) defunct_connection = Mock(spec=Connection, in_flight=0, is_idle=False, is_defunct=True, is_closed=False) @@ -358,11 +358,12 @@ def test_closed_defunct(self): self.assertEqual(closed_connection.send_msg.call_count, 0) self.assertEqual(defunct_connection.send_msg.call_count, 0) - def test_no_req_ids(self): + def test_no_req_ids(self, *args): in_flight = 3 get_holders = self.make_get_holders(1) max_connection = Mock(spec=Connection, host='localhost', + lock=Lock(), max_request_id=in_flight, in_flight=in_flight, is_idle=True, is_defunct=False, is_closed=False) holder = get_holders.return_value[0] @@ -377,7 +378,7 @@ def test_no_req_ids(self): max_connection.defunct.assert_has_calls([call(ANY)] * get_holders.call_count) holder.return_connection.assert_has_calls([call(max_connection)] * get_holders.call_count) - def test_unexpected_response(self): + def test_unexpected_response(self, *args): request_id = 999 get_holders = self.make_get_holders(1) @@ -405,7 +406,7 @@ def send_msg(msg, req_id, msg_callback): self.assertEqual(exc.args, Exception('Connection heartbeat failure').args) holder.return_connection.assert_has_calls([call(connection)] * get_holders.call_count) - def test_timeout(self): + def test_timeout(self, *args): request_id = 999 get_holders = self.make_get_holders(1) From 89d07149aca05f679ce581585bfa5eaca09c1410 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 20 Feb 2015 14:36:24 -0600 Subject: [PATCH 1238/3726] cqlengine: format collection db_type on init, remove redundant get_column_def --- cassandra/cqlengine/columns.py | 32 +++++--------------------------- 1 file changed, 5 insertions(+), 27 deletions(-) diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index 4bb3f6903a..fd157956d5 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -668,14 +668,6 @@ def validate(self, value): raise ValidationError("{} Collection can't have more than 65535 elements.".format(self.column_name)) return value - def get_column_def(self): - """ - Returns a column definition for CQL table definition - """ - static = "static" if self.static else "" - db_type = self.db_type.format(self.value_type.db_type) - return '{} {} {}'.format(self.cql, db_type, static) - def _val_is_null(self, val): return not val @@ -692,8 +684,6 @@ class Set(BaseContainerColumn): http://www.datastax.com/documentation/cql/3.1/cql/cql_using/use_set_t.html """ - db_type = 'set<{}>' - class Quoter(BaseContainerQuoter): def __str__(self): @@ -707,7 +697,7 @@ def __init__(self, value_type, strict=True, default=set, **kwargs): type on validation, or raise a validation error, defaults to True """ self.strict = strict - + self.db_type = 'set<{}>'.format(value_type.db_type) super(Set, self).__init__(value_type, default=default, **kwargs) def validate(self, value): @@ -746,8 +736,6 @@ class List(BaseContainerColumn): http://www.datastax.com/documentation/cql/3.1/cql/cql_using/use_list_t.html """ - db_type = 'list<{}>' - class Quoter(BaseContainerQuoter): def __str__(self): @@ -761,6 +749,7 @@ def __init__(self, value_type, default=list, **kwargs): """ :param value_type: a column class indicating the types of the value """ + self.db_type = 'list<{}>'.format(value_type.db_type) return super(List, self).__init__(value_type=value_type, default=default, **kwargs) def validate(self, value): @@ -792,9 +781,6 @@ class Map(BaseContainerColumn): http://www.datastax.com/documentation/cql/3.1/cql/cql_using/use_map_t.html """ - - db_type = 'map<{}, {}>' - class Quoter(BaseContainerQuoter): def __str__(self): @@ -815,6 +801,9 @@ def __init__(self, key_type, value_type, default=dict, **kwargs): :param key_type: a column class indicating the types of the key :param value_type: a column class indicating the types of the value """ + + self.db_type = 'map<{}, {}>'.format(key_type.db_type, value_type.db_type) + inheritance_comparator = issubclass if isinstance(key_type, type) else isinstance if not inheritance_comparator(key_type, Column): raise ValidationError('key_type must be a column class') @@ -831,17 +820,6 @@ def __init__(self, key_type, value_type, default=dict, **kwargs): self.key_type = self.key_col.__class__ super(Map, self).__init__(value_type, default=default, **kwargs) - def get_column_def(self): - """ - Returns a column definition for CQL table definition - """ - db_type = self.db_type.format( - self.key_type.db_type, - self.value_type.db_type - ) - static = "static" if self.static else "" - return '{} {} {}'.format(self.cql, db_type, static) - def validate(self, value): val = super(Map, self).validate(value) if val is None: From b7e5c6b999fbac9afb7435a912181565c4b45759 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 2 Mar 2015 08:42:49 -0600 Subject: [PATCH 1239/3726] Use utcfromttimestamp workaround in SimpleDateType Also improve datetime_from_timesstamp perf by always using timedelta math. Always using timedelta math also addresses the utcfromtimestamp rounding differences noted in PYTHON-230. --- cassandra/cqltypes.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index 9ce04b004b..026024746c 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -75,12 +75,12 @@ def unix_time_from_uuid1(u): return (u.time - 0x01B21DD213814000) / 10000000.0 +DATETIME_EPOC = datetime.datetime(1970, 1, 1) + + def datetime_from_timestamp(timestamp): - if timestamp >= 0: - dt = datetime.datetime.utcfromtimestamp(timestamp) - else: - # PYTHON-119: workaround for Windows - dt = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=timestamp) + # PYTHON-119: workaround for Windows + dt = DATETIME_EPOC + datetime.timedelta(seconds=timestamp) return dt @@ -652,7 +652,7 @@ def serialize(val, protocol_version): @staticmethod def deserialize(byts, protocol_version): timestamp = SimpleDateType.seconds_per_day * (uint32_unpack(byts) - 2 ** 31) - dt = datetime.datetime.utcfromtimestamp(timestamp) + dt = datetime_from_timestamp(timestamp) return datetime.date(dt.year, dt.month, dt.day) From ef72d2fa752fd8ca644678d0a8631ccbdf203feb Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 2 Mar 2015 13:01:18 -0600 Subject: [PATCH 1240/3726] Removed unused members in cqlengine.AbstractQuerySet --- cassandra/cqlengine/query.py | 7 ------- 1 file changed, 7 deletions(-) diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index 62000f757b..15451db426 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -256,8 +256,6 @@ def __init__(self, model): self._flat_values_list = False # results cache - self._con = None - self._cur = None self._result_cache = None self._result_idx = None @@ -358,11 +356,6 @@ def _fill_result_cache_to_idx(self, idx): self._result_idx += 1 self._result_cache[self._result_idx] = self._construct_result(self._result_cache[self._result_idx]) - # return the connection to the connection pool if we have all objects - if self._result_cache and self._result_idx == (len(self._result_cache) - 1): - self._con = None - self._cur = None - def __iter__(self): self._execute_query() From ae568b4b7eb1f184d4a20ea4ecdc1e9b6f87c0e1 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 3 Mar 2015 13:02:35 -0600 Subject: [PATCH 1241/3726] Add User Definted Types to mapper. First pass; requires tests, further documentation --- cassandra/cqlengine/columns.py | 44 ++++++ cassandra/cqlengine/connection.py | 34 ++++- cassandra/cqlengine/management.py | 90 +++++++++++- cassandra/cqlengine/models.py | 20 ++- cassandra/cqlengine/query.py | 14 +- cassandra/cqlengine/usertype.py | 144 ++++++++++++++++++++ docs/api/cassandra/cqlengine/management.rst | 2 + docs/api/cassandra/cqlengine/usertype.rst | 10 ++ docs/api/index.rst | 1 + docs/cqlengine/faq.rst | 2 +- 10 files changed, 343 insertions(+), 18 deletions(-) create mode 100644 cassandra/cqlengine/usertype.py create mode 100644 docs/api/cassandra/cqlengine/usertype.rst diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index fd157956d5..a6b1c6c5e6 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -251,6 +251,11 @@ def get_column_def(self): static = "static" if self.static else "" return '{} {} {}'.format(self.cql, self.db_type, static) + # TODO: make columns use cqltypes under the hood + # until then, this bridges the gap in using types along with cassandra.metadata for CQL generation + def cql_parameterized_type(self): + return self.db_type + def set_column_name(self, name): """ Sets the column name during document class construction @@ -279,6 +284,10 @@ def _val_is_null(self, val): """ determines if the given value equates to a null value for the given column type """ return val is None + @property + def sub_columns(self): + return [] + class Blob(Column): """ @@ -671,6 +680,10 @@ def validate(self, value): def _val_is_null(self, val): return not val + @property + def sub_columns(self): + return [self.value_col] + class BaseContainerQuoter(ValueQuoter): @@ -841,6 +854,37 @@ def to_database(self, value): return value return self.Quoter({self.key_col.to_database(k): self.value_col.to_database(v) for k, v in value.items()}) + @property + def sub_columns(self): + return [self.key_col, self.value_col] + + +class UserDefinedType(Column): + """ + User Defined Type column + + http://www.datastax.com/documentation/cql/3.1/cql/cql_using/cqlUseUDT.html + """ + + def __init__(self, user_type, **kwargs): + """ + :param type user_type: specifies a :class:`~.Type` model for the column + """ + self.user_type = user_type + self.db_type = "frozen<%s>" % user_type.type_name() + super(UserDefinedType, self).__init__(**kwargs) + + @property + def sub_columns(self): + return list(self.user_type._fields.values()) + + +def resolve_udts(col_def, out_list): + for col in col_def.sub_columns: + resolve_udts(col, out_list) + if isinstance(col_def, UserDefinedType): + out_list.append(col_def.user_type) + class _PartitionKeysToken(Column): """ diff --git a/cassandra/cqlengine/connection.py b/cassandra/cqlengine/connection.py index b14570ae6a..53a5ec3a48 100644 --- a/cassandra/cqlengine/connection.py +++ b/cassandra/cqlengine/connection.py @@ -17,7 +17,7 @@ import six from cassandra import ConsistencyLevel -from cassandra.cluster import Cluster, _NOT_SET, NoHostAvailable +from cassandra.cluster import Cluster, _NOT_SET, NoHostAvailable, UserTypeDoesNotExist from cassandra.query import SimpleStatement, Statement, dict_factory from cassandra.cqlengine import CQLEngineException @@ -36,6 +36,12 @@ default_consistency_level = ConsistencyLevel.ONE +# Because type models may be registered before a connection is present, +# and because sessions may be replaced, we must register UDTs here, in order +# to have them registered when a new session is established. +udt_by_keyspace = {} + + class UndefinedKeyspaceException(CQLEngineException): pass @@ -54,6 +60,8 @@ def default(): session = cluster.connect() session.row_factory = dict_factory + _register_known_types(cluster) + log.debug("cqlengine connection initialized with default session to localhost") @@ -74,6 +82,8 @@ def set_session(s): session = s cluster = s.cluster + _register_known_types(cluster) + log.debug("cqlengine connection initialized with %s", s) @@ -129,6 +139,8 @@ def setup( raise session.row_factory = dict_factory + _register_known_types(cluster) + def execute(query, params=None, consistency_level=None, timeout=NOT_SET): @@ -178,3 +190,23 @@ def handle_lazy_connect(): hosts, kwargs = lazy_connect_args lazy_connect_args = None setup(hosts, **kwargs) + + +def register_udt(keyspace, type_name, klass): + try: + udt_by_keyspace[keyspace][type_name] = klass + except KeyError: + udt_by_keyspace[keyspace] = {type_name: klass} + + global cluster + if cluster: + cluster.register_user_type(keyspace, type_name, klass) + + +def _register_known_types(cluster): + for ks_name, name_type_map in udt_by_keyspace.items(): + for type_name, klass in name_type_map.items(): + try: + cluster.register_user_type(ks_name, type_name, klass) + except UserTypeDoesNotExist: + pass # new types are covered in management sync functions diff --git a/cassandra/cqlengine/management.py b/cassandra/cqlengine/management.py index 62208c3405..66016f482d 100644 --- a/cassandra/cqlengine/management.py +++ b/cassandra/cqlengine/management.py @@ -19,11 +19,13 @@ import six import warnings -from cassandra.metadata import KeyspaceMetadata +from cassandra import metadata from cassandra.cqlengine import CQLEngineException, SizeTieredCompactionStrategy, LeveledCompactionStrategy +from cassandra.cqlengine import columns from cassandra.cqlengine.connection import execute, get_cluster from cassandra.cqlengine.models import Model from cassandra.cqlengine.named import NamedTable +from cassandra.cqlengine.usertype import UserType CQLENG_ALLOW_SCHEMA_MANAGEMENT = 'CQLENG_ALLOW_SCHEMA_MANAGEMENT' @@ -132,7 +134,7 @@ def _create_keyspace(name, durable_writes, strategy_class, strategy_options): if name not in cluster.metadata.keyspaces: log.info("Creating keyspace %s ", name) - ks_meta = KeyspaceMetadata(name, durable_writes, strategy_class, strategy_options) + ks_meta = metadata.KeyspaceMetadata(name, durable_writes, strategy_class, strategy_options) execute(ks_meta.as_cql_query()) else: log.info("Not creating keyspace %s because it already exists", name) @@ -168,6 +170,8 @@ def sync_table(model): """ Inspects the model and creates / updates the corresponding table and columns. + Any User Defined Types used in the table are implicitly synchronized. + This function can only add fields that are not part of the primary key. Note that the attributes removed from the model are not deleted on the database. @@ -198,6 +202,13 @@ def sync_table(model): keyspace = cluster.metadata.keyspaces[ks_name] tables = keyspace.tables + syncd_types = set() + for col in model._columns.values(): + udts = [] + columns.resolve_udts(col, udts) + for udt in [u for u in udts if u not in syncd_types]: + _sync_type(ks_name, udt, syncd_types) + # check for an existing column family if raw_cf_name not in tables: log.debug("sync_table creating new table %s", cf_name) @@ -216,6 +227,7 @@ def sync_table(model): fields = get_fields(model) field_names = [x.name for x in fields] model_fields = set() + # # TODO: does this work with db_name?? for name, col in model._columns.items(): if col.primary_key or col.partition_key: continue # we can't mess with the PK @@ -248,6 +260,77 @@ def sync_table(model): execute(qs) +def sync_type(ks_name, type_model): + """ + Inspects the type_model and creates / updates the corresponding type. + + Note that the attributes removed from the type_model are not deleted on the database (this operation is not supported). + They become effectively ignored by (will not show up on) the type_model. + + **This function should be used with caution, especially in production environments. + Take care to execute schema modifications in a single context (i.e. not concurrently with other clients).** + + *There are plans to guard schema-modifying functions with an environment-driven conditional.* + """ + if not _allow_schema_modification(): + return + + if not issubclass(type_model, UserType): + raise CQLEngineException("Types must be derived from base UserType.") + + _sync_type(ks_name, type_model) + + +def _sync_type(ks_name, type_model, omit_subtypes=None): + + syncd_sub_types = omit_subtypes or set() + for field in type_model._fields.values(): + udts = [] + columns.resolve_udts(field, udts) + for udt in [u for u in udts if u not in syncd_sub_types]: + _sync_type(ks_name, udt, syncd_sub_types) + syncd_sub_types.add(udt) + + type_name = type_model.type_name() + type_name_qualified = "%s.%s" % (ks_name, type_name) + + cluster = get_cluster() + + keyspace = cluster.metadata.keyspaces[ks_name] + defined_types = keyspace.user_types + + if type_name not in defined_types: + log.debug("sync_type creating new type %s", type_name_qualified) + cql = get_create_type(type_model, ks_name) + execute(cql) + type_model.register_for_keyspace(ks_name) + else: + defined_fields = defined_types[type_name].field_names + model_fields = set() + for field in type_model._fields.values(): + model_fields.add(field.db_field_name) + if field.db_field_name not in defined_fields: + execute("ALTER TYPE {} ADD {}".format(type_name_qualified, field.get_column_def())) + + if len(defined_fields) == len(model_fields): + log.info("Type %s did not require synchronization", type_name_qualified) + return + + db_fields_not_in_model = model_fields.symmetric_difference(defined_fields) + if db_fields_not_in_model: + log.info("Type %s has fields not referenced by model: %s", type_name_qualified, db_fields_not_in_model) + + type_model.register_for_keyspace(ks_name) + + +def get_create_type(type_model, keyspace): + type_meta = metadata.UserType(keyspace, + type_model.type_name(), + (f.db_field_name for f in type_model._fields.values()), + type_model._fields.values()) + return type_meta.as_cql_query() + + def get_create_table(model): cf_name = model.column_family_name() qs = ['CREATE TABLE {}'.format(cf_name)] @@ -439,11 +522,12 @@ def drop_table(model): raw_cf_name = model.column_family_name(include_keyspace=False) try: - table = meta.keyspaces[ks_name].tables[raw_cf_name] + meta.keyspaces[ks_name].tables[raw_cf_name] execute('drop table {};'.format(model.column_family_name(include_keyspace=True))) except KeyError: pass + def _allow_schema_modification(): if not os.getenv(CQLENG_ALLOW_SCHEMA_MANAGEMENT): msg = CQLENG_ALLOW_SCHEMA_MANAGEMENT + " environment variable is not set. Future versions of this package will require this variable to enable management functions." diff --git a/cassandra/cqlengine/models.py b/cassandra/cqlengine/models.py index 0ef949d256..a27028d272 100644 --- a/cassandra/cqlengine/models.py +++ b/cassandra/cqlengine/models.py @@ -19,7 +19,8 @@ from cassandra.cqlengine import CQLEngineException, ValidationError from cassandra.cqlengine import columns -from cassandra.cqlengine.query import ModelQuerySet, DMLQuery, AbstractQueryableColumn, NOT_SET +from cassandra.cqlengine import connection +from cassandra.cqlengine import query from cassandra.cqlengine.query import DoesNotExist as _DoesNotExist from cassandra.cqlengine.query import MultipleObjectsReturned as _MultipleObjectsReturned from cassandra.util import OrderedDict @@ -211,7 +212,7 @@ def __call__(self, *args, **kwargs): raise NotImplementedError -class ColumnQueryEvaluator(AbstractQueryableColumn): +class ColumnQueryEvaluator(query.AbstractQueryableColumn): """ Wraps a column and allows it to be used in comparator expressions, returning query operators @@ -330,8 +331,8 @@ class MultipleObjectsReturned(_MultipleObjectsReturned): # end compaction # the queryset class used for this class - __queryset__ = ModelQuerySet - __dmlquery__ = DMLQuery + __queryset__ = query.ModelQuerySet + __dmlquery__ = query.DMLQuery __consistency__ = None # can be set per query @@ -371,7 +372,7 @@ def __init__(self, **values): # that update should be used when persisting changes self._is_persisted = False self._batch = None - self._timeout = NOT_SET + self._timeout = connection.NOT_SET def __repr__(self): """ @@ -741,7 +742,7 @@ def _class_batch(cls, batch): return cls.objects.batch(batch) def _inst_batch(self, batch): - assert self._timeout is NOT_SET, 'Setting both timeout and batch is not supported' + assert self._timeout is connection.NOT_SET, 'Setting both timeout and batch is not supported' self._batch = batch return self @@ -919,6 +920,13 @@ def _get_polymorphic_base(bases): # create the class and add a QuerySet to it klass = super(ModelMetaClass, cls).__new__(cls, name, bases, attrs) + udts = [] + for col in column_dict.values(): + columns.resolve_udts(col, udts) + + for user_type in set(udts): + user_type.register_for_keyspace(klass._get_keyspace()) + return klass diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index 15451db426..2a5c9744dc 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -18,7 +18,7 @@ import six from cassandra.cqlengine import columns, CQLEngineException, ValidationError, UnicodeMixin -from cassandra.cqlengine.connection import execute, NOT_SET +from cassandra.cqlengine import connection from cassandra.cqlengine.functions import Token, BaseQueryFunction, QueryValue from cassandra.cqlengine.operators import (InOperator, EqualsOperator, GreaterThanOperator, GreaterThanOrEqualOperator, LessThanOperator, @@ -117,7 +117,7 @@ class BatchQuery(object): _consistency = None def __init__(self, batch_type=None, timestamp=None, consistency=None, execute_on_exception=False, - timeout=NOT_SET): + timeout=connection.NOT_SET): """ :param batch_type: (optional) One of batch type values available through BatchType enum :type batch_type: str or None @@ -211,7 +211,7 @@ def execute(self): query_list.append('APPLY BATCH;') - tmp = execute('\n'.join(query_list), parameters, self._consistency, self._timeout) + tmp = connection.execute('\n'.join(query_list), parameters, self._consistency, self._timeout) check_applied(tmp) self.queries = [] @@ -264,7 +264,7 @@ def __init__(self, model): self._consistency = None self._timestamp = None self._if_not_exists = False - self._timeout = NOT_SET + self._timeout = connection.NOT_SET @property def column_family_name(self): @@ -274,7 +274,7 @@ def _execute(self, q): if self._batch: return self._batch.add_query(q) else: - result = execute(q, consistency_level=self._consistency, timeout=self._timeout) + result = connection.execute(q, consistency_level=self._consistency, timeout=self._timeout) if self._transaction: check_applied(result) return result @@ -1023,7 +1023,7 @@ class DMLQuery(object): _if_not_exists = False def __init__(self, model, instance=None, batch=None, ttl=None, consistency=None, timestamp=None, - if_not_exists=False, transaction=None, timeout=NOT_SET): + if_not_exists=False, transaction=None, timeout=connection.NOT_SET): self.model = model self.column_family_name = self.model.column_family_name() self.instance = instance @@ -1039,7 +1039,7 @@ def _execute(self, q): if self._batch: return self._batch.add_query(q) else: - tmp = execute(q, consistency_level=self._consistency, timeout=self._timeout) + tmp = connection.execute(q, consistency_level=self._consistency, timeout=self._timeout) if self._if_not_exists or self._transaction: check_applied(tmp) return tmp diff --git a/cassandra/cqlengine/usertype.py b/cassandra/cqlengine/usertype.py new file mode 100644 index 0000000000..916f738814 --- /dev/null +++ b/cassandra/cqlengine/usertype.py @@ -0,0 +1,144 @@ +import re +import six + +from cassandra.cluster import UserTypeDoesNotExist +from cassandra.util import OrderedDict +from cassandra.cqlengine import CQLEngineException +from cassandra.cqlengine import columns +from cassandra.cqlengine import connection +from cassandra.cqlengine import models + + +class UserTypeException(CQLEngineException): + pass + + +class UserTypeDefinitionException(UserTypeException): + pass + + +class BaseUserType(object): + """ + The base type class; don't inherit from this, inherit from UserType, defined below + """ + __type_name__ = None + + _fields = None + _db_map = None + + def __init__(self, **values): + self._values = {} + + for name, field in self._fields.items(): + value = values.get(name, None) + if value is not None or isinstance(field, columns.BaseContainerColumn): + value = field.to_python(value) + value_mngr = field.value_manager(self, field, value) + if name in values: + value_mngr.explicit = True + self._values[name] = value_mngr + + def __eq__(self, other): + print self.__class__, other.__class__ + if self.__class__ != other.__class__: + return False + + keys = set(self._fields.keys()) + other_keys = set(other._fields.keys()) + if keys != other_keys: + return False + + for key in other_keys: + if getattr(self, key, None) != getattr(other, key, None): + return False + + return True + + def __ne__(self, other): + return not self.__eq__(other) + + @classmethod + def register_for_keyspace(cls, keyspace): + connection.register_udt(keyspace, cls.type_name(), cls) + + @classmethod + def type_name(cls): + """ + Returns the type name if it's been defined + otherwise, it creates it from the class name + """ + if cls.__type_name__: + type_name = cls.__type_name__.lower() + else: + camelcase = re.compile(r'([a-z])([A-Z])') + ccase = lambda s: camelcase.sub(lambda v: '{}_{}'.format(v.group(1), v.group(2)), s) + + type_name = ccase(cls.__name__) + # trim to less than 48 characters or cassandra will complain + type_name = type_name[-48:] + type_name = type_name.lower() + type_name = re.sub(r'^_+', '', type_name) + cls.__type_name__ = type_name + + return type_name + + def validate(self): + """ + Cleans and validates the field values + """ + pass + for name, field in self._fields.items(): + v = getattr(self, name) + if v is None and not self._values[name].explicit and field.has_default: + v = field.get_default() + val = field.validate(v) + setattr(self, name, val) + + +class UserTypeMetaClass(type): + + def __new__(cls, name, bases, attrs): + field_dict = OrderedDict() + + field_defs = [(k, v) for k, v in attrs.items() if isinstance(v, columns.Column)] + field_defs = sorted(field_defs, key=lambda x: x[1].position) + + # TODO: this plus more validation + #counter_columns = [c for c in defined_columns.values() if isinstance(c, columns.Counter)] + #if counter_columns and data_columns: + # raise ModelDefinitionException('counter models may not have data columns') + + def _transform_column(field_name, field_obj): + field_dict[field_name] = field_obj + field_obj.set_column_name(field_name) + attrs[field_name] = models.ColumnDescriptor(field_obj) + + # transform field definitions + for k, v in field_defs: + # don't allow a field with the same name as a built-in attribute or method + if k in BaseUserType.__dict__: + raise UserTypeDefinitionException("field '{}' conflicts with built-in attribute/method".format(k)) + _transform_column(k, v) + + # create db_name -> model name map for loading + db_map = {} + for field_name, field in field_dict.items(): + db_map[field.db_field_name] = field_name + + attrs['_fields'] = field_dict + attrs['_db_map'] = db_map + + klass = super(UserTypeMetaClass, cls).__new__(cls, name, bases, attrs) + + return klass + + +@six.add_metaclass(UserTypeMetaClass) +class UserType(BaseUserType): + + __type_name__ = None + """ + *Optional.* Sets the name of the CQL type for this type. + + If not specified, the type name will be the name of the class, with it's module name as it's prefix. + """ diff --git a/docs/api/cassandra/cqlengine/management.rst b/docs/api/cassandra/cqlengine/management.rst index 1a00d8ad06..fb483abc81 100644 --- a/docs/api/cassandra/cqlengine/management.rst +++ b/docs/api/cassandra/cqlengine/management.rst @@ -13,5 +13,7 @@ A collection of functions for managing keyspace and table schema. .. autofunction:: sync_table +.. autofunction:: sync_type + .. autofunction:: drop_table diff --git a/docs/api/cassandra/cqlengine/usertype.rst b/docs/api/cassandra/cqlengine/usertype.rst new file mode 100644 index 0000000000..ebed187da9 --- /dev/null +++ b/docs/api/cassandra/cqlengine/usertype.rst @@ -0,0 +1,10 @@ +``cassandra.cqlengine.usertype`` - Model classes for User Defined Types +======================================================================= + +.. module:: cassandra.cqlengine.usertype + +UserType +-------- +.. autoclass:: UserType + + .. autoattribute:: __type_name__ diff --git a/docs/api/index.rst b/docs/api/index.rst index 7fffe71259..e0fe9810d2 100644 --- a/docs/api/index.rst +++ b/docs/api/index.rst @@ -37,3 +37,4 @@ Object Mapper cassandra/cqlengine/query cassandra/cqlengine/connection cassandra/cqlengine/management + cassandra/cqlengine/usertype diff --git a/docs/cqlengine/faq.rst b/docs/cqlengine/faq.rst index c9af1d106c..6342538de4 100644 --- a/docs/cqlengine/faq.rst +++ b/docs/cqlengine/faq.rst @@ -3,7 +3,7 @@ Frequently Asked Questions ========================== Q: Why don't updates work correctly on models instantiated as Model(field=value, field2=value2)? ----------------------------------------------------------------------------------------------- +------------------------------------------------------------------------------------------------ A: The recommended way to create new rows is with the models .create method. The values passed into a model's init method are interpreted by the model as the values as they were read from a row. This allows the model to "know" which rows have changed since the row was read out of cassandra, and create suitable update statements. From c7590c90622d2d2b2f74a620b57833c54ce54dce Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 3 Mar 2015 14:21:55 -0600 Subject: [PATCH 1242/3726] Remove superfluous test after member cleanup in ef72d2f --- .../cqlengine/query/test_queryset.py | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/tests/integration/cqlengine/query/test_queryset.py b/tests/integration/cqlengine/query/test_queryset.py index e10290c4e8..8d8610b16b 100644 --- a/tests/integration/cqlengine/query/test_queryset.py +++ b/tests/integration/cqlengine/query/test_queryset.py @@ -546,23 +546,6 @@ def test_delete_without_any_where_args(self): TestModel.objects(attempt_id=0).delete() -class TestQuerySetConnectionHandling(BaseQuerySetUsage): - def test_conn_is_returned_after_filling_cache(self): - """ - Tests that the queryset returns it's connection after it's fetched all of it's results - """ - q = TestModel.objects(test_id=0) - #tuple of expected attempt_id, expected_result values - compare_set = set([(0, 5), (1, 10), (2, 15), (3, 20)]) - for t in q: - val = t.attempt_id, t.expected_result - assert val in compare_set - compare_set.remove(val) - - assert q._con is None - assert q._cur is None - - class TimeUUIDQueryModel(Model): partition = columns.UUID(primary_key=True) From b9d35045d806b59a09ab5adcc412675a1785b7e1 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 3 Mar 2015 14:43:27 -0600 Subject: [PATCH 1243/3726] Don't reverse lists in prepend context. This rolls out workaround for a previous bug in Cassandra. https://issues.apache.org/jira/browse/CASSANDRA-8733 If you rely on list prepend and are using this version of the mapper with C* < [2.0.13|2.1.3], the prepended list will need to be reversed in the application context. --- cassandra/cqlengine/statements.py | 9 ++------- .../cqlengine/statements/test_assignment_clauses.py | 6 ++---- 2 files changed, 4 insertions(+), 11 deletions(-) diff --git a/cassandra/cqlengine/statements.py b/cassandra/cqlengine/statements.py index 7c9f281e90..234150819f 100644 --- a/cassandra/cqlengine/statements.py +++ b/cassandra/cqlengine/statements.py @@ -291,10 +291,7 @@ def update_context(self, ctx): ctx[str(ctx_id)] = self._to_database(self._assignments) ctx_id += 1 if self._prepend is not None: - # CQL seems to prepend element at a time, starting - # with the element at idx 0, we can either reverse - # it here, or have it inserted in reverse - ctx[str(ctx_id)] = self._to_database(list(reversed(self._prepend))) + ctx[str(ctx_id)] = self._to_database(self._prepend) ctx_id += 1 if self._append is not None: ctx[str(ctx_id)] = self._to_database(self._append) @@ -308,9 +305,7 @@ def _analyze(self): self._append = self.value elif self._operation == "prepend": - # self.value is a Quoter but we reverse self._prepend later as if - # it's a list, so we have to set it to the underlying list - self._prepend = self.value.value + self._prepend = self.value elif self.previous is None: self._assignments = self.value diff --git a/tests/integration/cqlengine/statements/test_assignment_clauses.py b/tests/integration/cqlengine/statements/test_assignment_clauses.py index e322d1dbc8..76d526f225 100644 --- a/tests/integration/cqlengine/statements/test_assignment_clauses.py +++ b/tests/integration/cqlengine/statements/test_assignment_clauses.py @@ -226,8 +226,7 @@ def test_prepend(self): ctx = {} c.update_context(ctx) - # test context list reversal - self.assertEqual(ctx, {'0': [2, 1]}) + self.assertEqual(ctx, {'0': [1, 2]}) def test_append_and_prepend(self): c = ListUpdateClause('s', [1, 2, 3, 4, 5, 6], previous=[3, 4]) @@ -243,8 +242,7 @@ def test_append_and_prepend(self): ctx = {} c.update_context(ctx) - # test context list reversal - self.assertEqual(ctx, {'0': [2, 1], '1': [5, 6]}) + self.assertEqual(ctx, {'0': [1, 2], '1': [5, 6]}) def test_shrinking_list_update(self): """ tests that updating to a smaller list results in an insert statement """ From 20e0cd9025a5c069cf9b8eab6ddf978b93feed2a Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 4 Mar 2015 12:10:44 -0600 Subject: [PATCH 1244/3726] Make integration tests cleanup cluster/sessions Fixes an issue where integration suites were timing out in later tests, due to eariler tests leaking resources. Root cause is PYTHON-135, which points out that cluster and session objects do not clean up properly when passing from scope. Until that is fixed, proper cleanup will avoid these leaks. --- tests/integration/long/test_consistency.py | 8 +++ tests/integration/long/test_large_data.py | 10 ++++ .../long/test_loadbalancingpolicies.py | 20 ++++++++ tests/integration/standard/test_cluster.py | 27 ++++++---- tests/integration/standard/test_concurrent.py | 15 ++++-- tests/integration/standard/test_metadata.py | 7 +++ tests/integration/standard/test_metrics.py | 7 +++ .../standard/test_prepared_statements.py | 19 ++++++- tests/integration/standard/test_query.py | 50 +++++++++---------- .../integration/standard/test_query_paging.py | 3 ++ .../standard/test_row_factories.py | 21 ++++---- tests/integration/standard/test_types.py | 2 +- tests/integration/standard/test_udts.py | 10 ++++ 13 files changed, 145 insertions(+), 54 deletions(-) diff --git a/tests/integration/long/test_consistency.py b/tests/integration/long/test_consistency.py index 7cb7d14a4c..79dd05ad7d 100644 --- a/tests/integration/long/test_consistency.py +++ b/tests/integration/long/test_consistency.py @@ -145,6 +145,8 @@ def _test_tokenaware_one_node_down(self, keyspace, rf, accepted): start(2) wait_for_up(cluster, 2) + cluster.shutdown() + def test_rfone_tokenaware_one_node_down(self): self._test_tokenaware_one_node_down( keyspace='test_rfone_tokenaware', @@ -187,6 +189,8 @@ def test_rfthree_tokenaware_none_down(self): SINGLE_DC_CONSISTENCY_LEVELS - set([ConsistencyLevel.ANY]), expected_reader=2) + cluster.shutdown() + def _test_downgrading_cl(self, keyspace, rf, accepted): cluster = Cluster( load_balancing_policy=TokenAwarePolicy(RoundRobinPolicy()), @@ -216,6 +220,8 @@ def _test_downgrading_cl(self, keyspace, rf, accepted): start(2) wait_for_up(cluster, 2) + cluster.shutdown() + def test_rfone_downgradingcl(self): self._test_downgrading_cl( keyspace='test_rfone_downgradingcl', @@ -283,6 +289,8 @@ def rfthree_downgradingcl(self, cluster, keyspace, roundrobin): start(2) wait_for_up(cluster, 2) + session.cluster.shutdown() + # TODO: can't be done in this class since we reuse the ccm cluster # instead we should create these elsewhere # def test_rfthree_downgradingcl_twodcs(self): diff --git a/tests/integration/long/test_large_data.py b/tests/integration/long/test_large_data.py index 9fe1a6b6fe..36912845c1 100644 --- a/tests/integration/long/test_large_data.py +++ b/tests/integration/long/test_large_data.py @@ -100,6 +100,8 @@ def test_wide_rows(self): for i, row in enumerate(results): self.assertEqual(row['i'], i) + session.cluster.shutdown() + def test_wide_batch_rows(self): table = 'wide_batch_rows' session = self.make_session_and_keyspace() @@ -120,6 +122,8 @@ def test_wide_batch_rows(self): for i, row in enumerate(results): self.assertEqual(row['i'], i) + session.cluster.shutdown() + def test_wide_byte_rows(self): table = 'wide_byte_rows' session = self.make_session_and_keyspace() @@ -138,6 +142,8 @@ def test_wide_byte_rows(self): for row in results: self.assertEqual(row['v'], bb) + session.cluster.shutdown() + def test_large_text(self): table = 'large_text' session = self.make_session_and_keyspace() @@ -158,6 +164,8 @@ def test_large_text(self): for row in result: self.assertEqual(row['txt'], text) + session.cluster.shutdown() + def test_wide_table(self): table = 'wide_table' table_width = 330 @@ -184,3 +192,5 @@ def test_wide_table(self): for row in result: for i in range(table_width): self.assertEqual(row[create_column_name(i)], i) + + session.cluster.shutdown() diff --git a/tests/integration/long/test_loadbalancingpolicies.py b/tests/integration/long/test_loadbalancingpolicies.py index 8bb9e3caa0..102cd76cb3 100644 --- a/tests/integration/long/test_loadbalancingpolicies.py +++ b/tests/integration/long/test_loadbalancingpolicies.py @@ -147,6 +147,8 @@ def test_roundrobin_two_dcs(self): self.coordinator_stats.assert_query_count_equals(self, 4, 3) self.coordinator_stats.assert_query_count_equals(self, 5, 3) + cluster.shutdown() + def test_roundrobin_two_dcs_2(self): use_multidc([2, 2]) keyspace = 'test_roundrobin_two_dcs_2' @@ -185,6 +187,8 @@ def test_roundrobin_two_dcs_2(self): self.coordinator_stats.assert_query_count_equals(self, 4, 3) self.coordinator_stats.assert_query_count_equals(self, 5, 3) + cluster.shutdown() + def test_dc_aware_roundrobin_two_dcs(self): use_multidc([3, 2]) keyspace = 'test_dc_aware_roundrobin_two_dcs' @@ -208,6 +212,8 @@ def test_dc_aware_roundrobin_two_dcs(self): self.coordinator_stats.assert_query_count_equals(self, 4, 0) self.coordinator_stats.assert_query_count_equals(self, 5, 0) + cluster.shutdown() + def test_dc_aware_roundrobin_two_dcs_2(self): use_multidc([3, 2]) keyspace = 'test_dc_aware_roundrobin_two_dcs_2' @@ -231,6 +237,8 @@ def test_dc_aware_roundrobin_two_dcs_2(self): self.coordinator_stats.assert_query_count_equals(self, 4, 6) self.coordinator_stats.assert_query_count_equals(self, 5, 6) + cluster.shutdown() + def test_dc_aware_roundrobin_one_remote_host(self): use_multidc([2, 2]) keyspace = 'test_dc_aware_roundrobin_one_remote_host' @@ -314,6 +322,8 @@ def test_dc_aware_roundrobin_one_remote_host(self): except NoHostAvailable: pass + cluster.shutdown() + def test_token_aware(self): keyspace = 'test_token_aware' self.token_aware(keyspace) @@ -394,6 +404,8 @@ def token_aware(self, keyspace, use_prepared=False): self.assertEqual(results, set([0, 12])) self.coordinator_stats.assert_query_count_equals(self, 2, 0) + cluster.shutdown() + def test_token_aware_composite_key(self): use_singledc() keyspace = 'test_token_aware_composite_key' @@ -423,6 +435,8 @@ def test_token_aware_composite_key(self): self.assertTrue(len(results) == 1) self.assertTrue(results[0].i) + cluster.shutdown() + def test_token_aware_with_rf_2(self, use_prepared=False): use_singledc() keyspace = 'test_token_aware_with_rf_2' @@ -452,6 +466,8 @@ def test_token_aware_with_rf_2(self, use_prepared=False): self.coordinator_stats.assert_query_count_equals(self, 2, 0) self.coordinator_stats.assert_query_count_equals(self, 3, 12) + cluster.shutdown() + def test_token_aware_with_local_table(self): use_singledc() cluster = Cluster( @@ -465,6 +481,8 @@ def test_token_aware_with_local_table(self): self.assertEqual(len(r), 1) self.assertEqual(r[0].key, 'local') + cluster.shutdown() + def test_white_list(self): use_singledc() keyspace = 'test_white_list' @@ -499,3 +517,5 @@ def test_white_list(self): self.fail() except NoHostAvailable: pass + + cluster.shutdown() diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index 95e6082b2f..f2d55b6b31 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -98,12 +98,14 @@ def test_connect_on_keyspace(self): session2 = cluster.connect('test3rf') result2 = session2.execute("SELECT * FROM test") self.assertEqual(result, result2) + cluster.shutdown() def test_set_keyspace_twice(self): cluster = Cluster(protocol_version=PROTOCOL_VERSION) session = cluster.connect() session.execute("USE system") session.execute("USE system") + cluster.shutdown() def test_default_connections(self): """ @@ -122,7 +124,6 @@ def test_connect_to_already_shutdown_cluster(self): """ Ensure you cannot connect to a cluster that's been shutdown """ - cluster = Cluster(protocol_version=PROTOCOL_VERSION) cluster.shutdown() self.assertRaises(Exception, cluster.connect) @@ -213,6 +214,8 @@ def test_submit_schema_refresh(self): self.assertIn("newkeyspace", cluster.metadata.keyspaces) session.execute("DROP KEYSPACE newkeyspace") + cluster.shutdown() + other_cluster.shutdown() def test_refresh_schema(self): cluster = Cluster(protocol_version=PROTOCOL_VERSION) @@ -224,7 +227,7 @@ def test_refresh_schema(self): self.assertIsNot(original_meta, cluster.metadata.keyspaces) self.assertEqual(original_meta, cluster.metadata.keyspaces) - session.shutdown() + cluster.shutdown() def test_refresh_schema_keyspace(self): cluster = Cluster(protocol_version=PROTOCOL_VERSION) @@ -240,7 +243,7 @@ def test_refresh_schema_keyspace(self): current_system_meta = current_meta['system'] self.assertIsNot(original_system_meta, current_system_meta) self.assertEqual(original_system_meta.as_cql_query(), current_system_meta.as_cql_query()) - session.shutdown() + cluster.shutdown() def test_refresh_schema_table(self): cluster = Cluster(protocol_version=PROTOCOL_VERSION) @@ -259,7 +262,7 @@ def test_refresh_schema_table(self): self.assertIs(original_system_meta, current_system_meta) self.assertIsNot(original_system_schema_meta, current_system_schema_meta) self.assertEqual(original_system_schema_meta.as_cql_query(), current_system_schema_meta.as_cql_query()) - session.shutdown() + cluster.shutdown() def test_refresh_schema_type(self): if get_server_versions()[0] < (2, 1, 0): @@ -331,7 +334,7 @@ def test_refresh_schema_no_wait(self): self.assertIsNot(original_meta, c.metadata.keyspaces) self.assertEqual(original_meta, c.metadata.keyspaces) - s.shutdown() + c.shutdown() refresh_threshold = 0.5 # cluster agreement bypass @@ -359,11 +362,11 @@ def test_refresh_schema_no_wait(self): self.assertGreaterEqual(end_time - start_time, agreement_timeout) self.assertIs(original_meta, c.metadata.keyspaces) - s.shutdown() + c.shutdown() finally: session.execute("UPDATE system.local SET schema_version=%s WHERE key='local'", (schema_ver,)) - session.shutdown() + cluster.shutdown() def test_trace(self): """ @@ -406,6 +409,7 @@ def check_trace(trace): future = session.execute_async(prepared, parameters=(), trace=True) future.result() check_trace(future.get_query_trace()) + cluster.shutdown() def test_trace_timeout(self): cluster = Cluster(protocol_version=PROTOCOL_VERSION) @@ -416,6 +420,7 @@ def test_trace_timeout(self): future = session.execute_async(statement, trace=True) future.result() self.assertRaises(TraceUnavailable, future.get_query_trace, -1.0) + cluster.shutdown() def test_string_coverage(self): """ @@ -434,6 +439,7 @@ def test_string_coverage(self): self.assertIn(query, str(future)) self.assertIn('result', str(future)) + cluster.shutdown() def test_idle_heartbeat(self): interval = 1 @@ -492,7 +498,7 @@ def test_idle_heartbeat(self): cluster._idle_heartbeat.join() assert_quiescent_pool_state(self, cluster) - session.shutdown() + cluster.shutdown() @patch('cassandra.cluster.Cluster.idle_heartbeat_interval', new=0.1) def test_idle_heartbeat_disabled(self): @@ -511,7 +517,7 @@ def test_idle_heartbeat_disabled(self): # assert not idle status (should never get reset because there is not heartbeat) self.assertFalse(any(c.is_idle for c in connections)) - session.shutdown() + cluster.shutdown() def test_pool_management(self): # Ensure that in_flight and request_ids quiesce after cluster operations @@ -544,5 +550,4 @@ def test_pool_management(self): assert_quiescent_pool_state(self, cluster) - session2.shutdown() - session.shutdown() + cluster.shutdown() diff --git a/tests/integration/standard/test_concurrent.py b/tests/integration/standard/test_concurrent.py index 211c44206d..5ca5b405e5 100644 --- a/tests/integration/standard/test_concurrent.py +++ b/tests/integration/standard/test_concurrent.py @@ -35,12 +35,17 @@ def setup_module(): class ClusterTests(unittest.TestCase): - def setUp(self): - self.cluster = Cluster(protocol_version=PROTOCOL_VERSION) + @classmethod + def setUpClass(cls): + cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION) if PROTOCOL_VERSION < 3: - self.cluster.set_core_connections_per_host(HostDistance.LOCAL, 1) - self.session = self.cluster.connect() - self.session.row_factory = tuple_factory + cls.cluster.set_core_connections_per_host(HostDistance.LOCAL, 1) + cls.session = cls.cluster.connect() + cls.session.row_factory = tuple_factory + + @classmethod + def tearDownClass(cls): + cls.cluster.shutdown() def test_execute_concurrent(self): for num_statements in (0, 1, 2, 7, 10, 99, 100, 101, 199, 200, 201): diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 834887d533..71af7c8f25 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -367,6 +367,7 @@ def test_export_keyspace_schema(self): keyspace_metadata = cluster.metadata.keyspaces[keyspace] self.assertIsInstance(keyspace_metadata.export_as_string(), six.string_types) self.assertIsInstance(keyspace_metadata.as_cql_query(), six.string_types) + cluster.shutdown() def assert_equal_diff(self, received, expected): if received != expected: @@ -478,6 +479,8 @@ def test_export_keyspace_schema_udts(self): self.assert_equal_diff(table_meta.export_as_string(), expected_string) + cluster.shutdown() + def test_case_sensitivity(self): """ Test that names that need to be escaped in CREATE statements are @@ -516,6 +519,7 @@ def test_case_sensitivity(self): self.assertIn('PRIMARY KEY (k, "A")', schema) self.assertIn('WITH CLUSTERING ORDER BY ("A" DESC)', schema) self.assertIn('CREATE INDEX myindex ON "AnInterestingKeyspace"."AnInterestingTable" ("MyColumn")', schema) + cluster.shutdown() def test_already_exists_exceptions(self): """ @@ -538,6 +542,7 @@ def test_already_exists_exceptions(self): k int PRIMARY KEY, v int )''' self.assertRaises(AlreadyExists, session.execute, ddl % (ksname, cfname)) + cluster.shutdown() def test_replicas(self): """ @@ -555,6 +560,7 @@ def test_replicas(self): host = list(cluster.metadata.get_replicas('test3rf', 'key'))[0] self.assertEqual(host.datacenter, 'dc1') self.assertEqual(host.rack, 'r1') + cluster.shutdown() def test_token_map(self): """ @@ -574,6 +580,7 @@ def test_token_map(self): self.assertEqual(set(get_replicas('test3rf', token)), set(owners)) self.assertEqual(set(get_replicas('test2rf', token)), set([owners[(i + 1) % 3], owners[(i + 2) % 3]])) self.assertEqual(set(get_replicas('test1rf', token)), set([owners[(i + 1) % 3]])) + cluster.shutdown() def test_legacy_tables(self): diff --git a/tests/integration/standard/test_metrics.py b/tests/integration/standard/test_metrics.py index 828ed6000c..e6c6551292 100644 --- a/tests/integration/standard/test_metrics.py +++ b/tests/integration/standard/test_metrics.py @@ -58,6 +58,7 @@ def test_connection_error(self): get_cluster().start(wait_for_binary_proto=True, wait_other_notice=True) self.assertGreater(cluster.metrics.stats.connection_errors, 0) + cluster.shutdown() def test_write_timeout(self): """ @@ -90,6 +91,8 @@ def test_write_timeout(self): finally: get_node(1).start(wait_other_notice=True, wait_for_binary_proto=True) + cluster.shutdown() + def test_read_timeout(self): """ Trigger and ensure read_timeouts are counted @@ -121,6 +124,8 @@ def test_read_timeout(self): finally: get_node(1).start(wait_other_notice=True, wait_for_binary_proto=True) + cluster.shutdown() + def test_unavailable(self): """ Trigger and ensure unavailables are counted @@ -157,6 +162,8 @@ def test_unavailable(self): finally: get_node(1).start(wait_other_notice=True, wait_for_binary_proto=True) + cluster.shutdown() + def test_other_error(self): # TODO: Bootstrapping or Overloaded cases pass diff --git a/tests/integration/standard/test_prepared_statements.py b/tests/integration/standard/test_prepared_statements.py index e91e0edefb..5b4f4569e6 100644 --- a/tests/integration/standard/test_prepared_statements.py +++ b/tests/integration/standard/test_prepared_statements.py @@ -28,7 +28,6 @@ def setup_module(): use_singledc() - class PreparedStatementTests(unittest.TestCase): def test_basic(self): @@ -101,6 +100,8 @@ def test_basic(self): results = session.execute(bound) self.assertEqual(results, [('x', 'y', 'z')]) + cluster.shutdown() + def test_missing_primary_key(self): """ Ensure an InvalidRequest is thrown @@ -119,6 +120,8 @@ def test_missing_primary_key(self): bound = prepared.bind((1,)) self.assertRaises(InvalidRequest, session.execute, bound) + cluster.shutdown() + def test_missing_primary_key_dicts(self): """ Ensure an InvalidRequest is thrown @@ -138,6 +141,8 @@ def test_missing_primary_key_dicts(self): bound = prepared.bind({'v': 1}) self.assertRaises(InvalidRequest, session.execute, bound) + cluster.shutdown() + def test_too_many_bind_values(self): """ Ensure a ValueError is thrown when attempting to bind too many variables @@ -154,6 +159,8 @@ def test_too_many_bind_values(self): self.assertIsInstance(prepared, PreparedStatement) self.assertRaises(ValueError, prepared.bind, (1, 2)) + cluster.shutdown() + def test_too_many_bind_values_dicts(self): """ Ensure a ValueError is thrown when attempting to bind too many variables @@ -175,6 +182,8 @@ def test_too_many_bind_values_dicts(self): self.assertIsInstance(prepared, PreparedStatement) self.assertRaises(KeyError, prepared.bind, {}) + cluster.shutdown() + def test_none_values(self): """ Ensure binding None is handled correctly @@ -202,6 +211,8 @@ def test_none_values(self): results = session.execute(bound) self.assertEqual(results[0].v, None) + cluster.shutdown() + def test_none_values_dicts(self): """ Ensure binding None is handled correctly with dict bindings @@ -230,6 +241,8 @@ def test_none_values_dicts(self): results = session.execute(bound) self.assertEqual(results[0].v, None) + cluster.shutdown() + def test_async_binding(self): """ Ensure None binding over async queries @@ -257,6 +270,8 @@ def test_async_binding(self): results = future.result() self.assertEqual(results[0].v, None) + cluster.shutdown() + def test_async_binding_dicts(self): """ Ensure None binding over async queries with dict bindings @@ -283,3 +298,5 @@ def test_async_binding_dicts(self): future = session.execute_async(prepared, {'k': 873}) results = future.result() self.assertEqual(results[0].v, None) + + cluster.shutdown() diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index c0db307555..ba5b7de51f 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -52,6 +52,8 @@ def test_query(self): session.execute(bound) self.assertEqual(bound.routing_key, b'\x00\x00\x00\x01') + cluster.shutdown() + def test_trace_prints_okay(self): """ Code coverage to ensure trace prints to string without error @@ -69,6 +71,8 @@ def test_trace_prints_okay(self): for event in statement.trace.events: str(event) + cluster.shutdown() + def test_trace_ignores_row_factory(self): cluster = Cluster(protocol_version=PROTOCOL_VERSION) session = cluster.connect() @@ -83,18 +87,23 @@ def test_trace_ignores_row_factory(self): for event in statement.trace.events: str(event) + cluster.shutdown() + class PreparedStatementTests(unittest.TestCase): + def setUp(self): + self.cluster = Cluster(protocol_version=PROTOCOL_VERSION) + self.session = self.cluster.connect() + + def tearDown(self): + self.cluster.shutdown() + def test_routing_key(self): """ Simple code coverage to ensure routing_keys can be accessed """ - - cluster = Cluster(protocol_version=PROTOCOL_VERSION) - session = cluster.connect() - - prepared = session.prepare( + prepared = self.session.prepare( """ INSERT INTO test3rf.test (k, v) VALUES (?, ?) """) @@ -108,11 +117,7 @@ def test_empty_routing_key_indexes(self): Ensure when routing_key_indexes are blank, the routing key should be None """ - - cluster = Cluster(protocol_version=PROTOCOL_VERSION) - session = cluster.connect() - - prepared = session.prepare( + prepared = self.session.prepare( """ INSERT INTO test3rf.test (k, v) VALUES (?, ?) """) @@ -127,11 +132,7 @@ def test_predefined_routing_key(self): Basic test that ensures _set_routing_key() overrides the current routing key """ - - cluster = Cluster(protocol_version=PROTOCOL_VERSION) - session = cluster.connect() - - prepared = session.prepare( + prepared = self.session.prepare( """ INSERT INTO test3rf.test (k, v) VALUES (?, ?) """) @@ -145,11 +146,7 @@ def test_multiple_routing_key_indexes(self): """ Basic test that uses a fake routing_key_index """ - - cluster = Cluster(protocol_version=PROTOCOL_VERSION) - session = cluster.connect() - - prepared = session.prepare( + prepared = self.session.prepare( """ INSERT INTO test3rf.test (k, v) VALUES (?, ?) """) @@ -167,11 +164,7 @@ def test_bound_keyspace(self): """ Ensure that bound.keyspace works as expected """ - - cluster = Cluster(protocol_version=PROTOCOL_VERSION) - session = cluster.connect() - - prepared = session.prepare( + prepared = self.session.prepare( """ INSERT INTO test3rf.test (k, v) VALUES (?, ?) """) @@ -213,6 +206,8 @@ def test_prepared_statement(self): self.assertEqual(str(bound), '') + cluster.shutdown() + class BatchStatementTests(unittest.TestCase): @@ -322,6 +317,9 @@ def setUp(self): self.cluster.set_core_connections_per_host(HostDistance.LOCAL, 1) self.session = self.cluster.connect() + def tearDown(self): + self.cluster.shutdown() + def test_conditional_update(self): self.session.execute("INSERT INTO test3rf.test (k, v) VALUES (0, 0)") statement = SimpleStatement( @@ -459,8 +457,6 @@ def test_rk_from_simple(self): """ batch routing key is inherited from SimpleStatement """ - self.cluster = Cluster(protocol_version=PROTOCOL_VERSION) - self.session = self.cluster.connect() batch = BatchStatement() batch.add(self.simple_statement) self.assertIsNotNone(batch.routing_key) diff --git a/tests/integration/standard/test_query_paging.py b/tests/integration/standard/test_query_paging.py index cc3dea6bc1..e48a4aae5e 100644 --- a/tests/integration/standard/test_query_paging.py +++ b/tests/integration/standard/test_query_paging.py @@ -50,6 +50,9 @@ def setUp(self): self.session = self.cluster.connect() self.session.execute("TRUNCATE test3rf.test") + def tearDown(self): + self.cluster.shutdown() + def test_paging(self): statements_and_params = zip(cycle(["INSERT INTO test3rf.test (k, v) VALUES (%s, 0)"]), [(i, ) for i in range(100)]) diff --git a/tests/integration/standard/test_row_factories.py b/tests/integration/standard/test_row_factories.py index 354021f257..ba472e52c3 100644 --- a/tests/integration/standard/test_row_factories.py +++ b/tests/integration/standard/test_row_factories.py @@ -33,6 +33,13 @@ class RowFactoryTests(unittest.TestCase): Test different row_factories and access code """ + def setUp(self): + self.cluster = Cluster(protocol_version=PROTOCOL_VERSION) + self.session = self.cluster.connect() + + def tearDown(self): + self.cluster.shutdown() + truncate = ''' TRUNCATE test3rf.test ''' @@ -56,8 +63,7 @@ class RowFactoryTests(unittest.TestCase): ''' def test_tuple_factory(self): - cluster = Cluster(protocol_version=PROTOCOL_VERSION) - session = cluster.connect() + session = self.session session.row_factory = tuple_factory session.execute(self.truncate) @@ -77,9 +83,8 @@ def test_tuple_factory(self): self.assertEqual(result[1][0], result[1][1]) self.assertEqual(result[1][0], 2) - def test_named_tuple_factoryy(self): - cluster = Cluster(protocol_version=PROTOCOL_VERSION) - session = cluster.connect() + def test_named_tuple_factory(self): + session = self.session session.row_factory = named_tuple_factory session.execute(self.truncate) @@ -99,8 +104,7 @@ def test_named_tuple_factoryy(self): self.assertEqual(result[1].k, 2) def test_dict_factory(self): - cluster = Cluster(protocol_version=PROTOCOL_VERSION) - session = cluster.connect() + session = self.session session.row_factory = dict_factory session.execute(self.truncate) @@ -121,8 +125,7 @@ def test_dict_factory(self): self.assertEqual(result[1]['k'], 2) def test_ordered_dict_factory(self): - cluster = Cluster(protocol_version=PROTOCOL_VERSION) - session = cluster.connect() + session = self.session session.row_factory = ordered_dict_factory session.execute(self.truncate) diff --git a/tests/integration/standard/test_types.py b/tests/integration/standard/test_types.py index c30b94c740..d4ef94c2d4 100644 --- a/tests/integration/standard/test_types.py +++ b/tests/integration/standard/test_types.py @@ -81,7 +81,7 @@ def setup_class(cls): @classmethod def teardown_class(cls): cls._session.execute("DROP KEYSPACE typetests") - cls._session.shutdown() + cls._session.cluster.shutdown() def test_blob_type_as_string(self): s = self._session diff --git a/tests/integration/standard/test_udts.py b/tests/integration/standard/test_udts.py index 87e76b3559..d6b74f3ad6 100644 --- a/tests/integration/standard/test_udts.py +++ b/tests/integration/standard/test_udts.py @@ -316,6 +316,8 @@ def test_udt_sizes(self): result = s.execute("SELECT v FROM mytable WHERE k=0", timeout=EXTENDED_QUERY_TIMEOUT)[0] self.assertEqual(created_udt, result.v) + c.shutdown() + def nested_udt_helper(self, udts, i): """ Helper for creating nested udts. @@ -389,6 +391,8 @@ def test_nested_registered_udts(self): result = s.execute("SELECT v_%s FROM mytable WHERE k=0", (i,))[0] self.assertEqual(udt, result['v_%s' % i]) + c.shutdown() + def test_nested_unregistered_udts(self): """ Test for ensuring nested unregistered udts are handled correctly. @@ -450,6 +454,8 @@ def test_nested_unregistered_udts(self): result = s.execute("SELECT v_%s FROM mytable WHERE k=0", (i,))[0] self.assertEqual(udt, result['v_%s' % i]) + c.shutdown() + def test_nested_registered_udts_with_different_namedtuples(self): """ Test for ensuring nested udts are handled correctly when the @@ -517,6 +523,8 @@ def test_nested_registered_udts_with_different_namedtuples(self): result = s.execute("SELECT v_%s FROM mytable WHERE k=0", (i,))[0] self.assertEqual(udt, result['v_%s' % i]) + c.shutdown() + def test_non_existing_types(self): c = Cluster(protocol_version=PROTOCOL_VERSION) c.connect() @@ -524,6 +532,8 @@ def test_non_existing_types(self): self.assertRaises(UserTypeDoesNotExist, c.register_user_type, "some_bad_keyspace", "user", User) self.assertRaises(UserTypeDoesNotExist, c.register_user_type, "system", "user", User) + c.shutdown() + def test_primitive_datatypes(self): """ Test for inserting various types of DATA_TYPE_PRIMITIVES into UDT's From 9e2e256ba34eae6d6c766cd39941030f0e0275e0 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 5 Mar 2015 17:01:46 -0600 Subject: [PATCH 1245/3726] Use cass type for key serlz when materializing map from DB Addresses issue where key types with strings were not comparing in an 'encoding-agnostic' way (PYTHON-321). --- cassandra/cqltypes.py | 19 ++++++++++--------- cassandra/util.py | 28 ++++++++++++++++++++++++---- 2 files changed, 34 insertions(+), 13 deletions(-) diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index 0c8648d4f7..8b5a90872d 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -780,12 +780,12 @@ class MapType(_ParameterizedType): @classmethod def validate(cls, val): - subkeytype, subvaltype = cls.subtypes - return dict((subkeytype.validate(k), subvaltype.validate(v)) for (k, v) in six.iteritems(val)) + key_type, subvaltype = cls.subtypes + return dict((key_type.validate(k), subvaltype.validate(v)) for (k, v) in six.iteritems(val)) @classmethod def deserialize_safe(cls, byts, protocol_version): - subkeytype, subvaltype = cls.subtypes + key_type, value_type = cls.subtypes if protocol_version >= 3: unpack = int32_unpack length = 4 @@ -795,6 +795,7 @@ def deserialize_safe(cls, byts, protocol_version): numelements = unpack(byts[:length]) p = length themap = OrderedMap() + themap._set_key_type(key_type, protocol_version) for _ in range(numelements): key_len = unpack(byts[p:p + length]) p += length @@ -804,14 +805,14 @@ def deserialize_safe(cls, byts, protocol_version): p += length valbytes = byts[p:p + val_len] p += val_len - key = subkeytype.from_binary(keybytes, protocol_version) - val = subvaltype.from_binary(valbytes, protocol_version) - themap._insert(key, val) + key = key_type.from_binary(keybytes, protocol_version) + val = value_type.from_binary(valbytes, protocol_version) + themap._insert_unchecked(key, keybytes, val) return themap @classmethod def serialize_safe(cls, themap, protocol_version): - subkeytype, subvaltype = cls.subtypes + key_type, value_type = cls.subtypes pack = int32_pack if protocol_version >= 3 else uint16_pack buf = io.BytesIO() buf.write(pack(len(themap))) @@ -820,8 +821,8 @@ def serialize_safe(cls, themap, protocol_version): except AttributeError: raise TypeError("Got a non-map object for a map value") for key, val in items: - keybytes = subkeytype.to_binary(key, protocol_version) - valbytes = subvaltype.to_binary(val, protocol_version) + keybytes = key_type.to_binary(key, protocol_version) + valbytes = value_type.to_binary(val, protocol_version) buf.write(pack(len(keybytes))) buf.write(keybytes) buf.write(pack(len(valbytes))) diff --git a/cassandra/util.py b/cassandra/util.py index 14bc09cb79..e1f158ad4c 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -1,4 +1,5 @@ from __future__ import with_statement +import six try: from collections import OrderedDict @@ -556,8 +557,8 @@ def _intersect(self, other): isect.add(item) return isect + from collections import Mapping -import six from six.moves import cPickle @@ -592,6 +593,14 @@ class OrderedMap(Mapping): or higher. ''' + + cass_key_type = None + protocol_version = None + ''' + Set when deserializing a map from the server, when the key type is known. + This avoids re-serializing, and also helps normalize lookups on text types. + ''' + def __init__(self, *args, **kwargs): if len(args) > 1: raise TypeError('expected at most 1 arguments, got %d' % len(args)) @@ -619,6 +628,18 @@ def _insert(self, key, value): self._items.append((key, value)) self._index[flat_key] = len(self._items) - 1 + def _set_key_type(self, cass_type, protocol_version): + self.cass_key_type = cass_type + self.protocol_version = protocol_version + + def _insert_unchecked(self, key, flat_key, value): + ''' + Used when building from server response. + cass_key_type must be set in order for lookups to work later + ''' + self._items.append((key, value)) + self._index[flat_key] = len(self._items) - 1 + def __getitem__(self, key): try: index = self._index[self._serialize_key(key)] @@ -653,9 +674,8 @@ def __repr__(self): def __str__(self): return '{%s}' % ', '.join("%s: %s" % (k, v) for k, v in self._items) - @staticmethod - def _serialize_key(key): - return cPickle.dumps(key) + def _serialize_key(self, key): + return self.cass_key_type.serialize(key, self.protocol_version) if self.cass_key_type else cPickle.dumps(key) import datetime From 2dba082da6f728c50ffc2b29fa4f2695495118cc Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 6 Mar 2015 08:48:34 -0600 Subject: [PATCH 1246/3726] Make unit tests work with latest version of pypy --- tests/unit/test_policies.py | 7 ++++--- tests/unit/test_types.py | 3 ++- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/tests/unit/test_policies.py b/tests/unit/test_policies.py index bd8426cd50..ba389c7662 100644 --- a/tests/unit/test_policies.py +++ b/tests/unit/test_policies.py @@ -140,20 +140,21 @@ def host_down(): # make the GIL switch after every instruction, maximizing # the chace of race conditions - if six.PY2: + check = six.PY2 or '__pypy__' in sys.builtin_module_names + if check: original_interval = sys.getcheckinterval() else: original_interval = sys.getswitchinterval() try: - if six.PY2: + if check: sys.setcheckinterval(0) else: sys.setswitchinterval(0.0001) map(lambda t: t.start(), threads) map(lambda t: t.join(), threads) finally: - if six.PY2: + if check: sys.setcheckinterval(original_interval) else: sys.setswitchinterval(original_interval) diff --git a/tests/unit/test_types.py b/tests/unit/test_types.py index b5b70b2f62..0e8125373e 100644 --- a/tests/unit/test_types.py +++ b/tests/unit/test_types.py @@ -20,6 +20,7 @@ import calendar import datetime import tempfile +import six import time import cassandra @@ -228,7 +229,7 @@ def __init__(self, subtypes, names): @classmethod def apply_parameters(cls, subtypes, names): - return cls(subtypes, [unhexlify(name) if name is not None else name for name in names]) + return cls(subtypes, [unhexlify(six.b(name)) if name is not None else name for name in names]) class BarType(FooType): typename = 'org.apache.cassandra.db.marshal.BarType' From 8d40400beb1065d63c8ba846737065ea39cfc942 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 9 Mar 2015 11:38:19 -0500 Subject: [PATCH 1247/3726] Collect time functions in util, add uuid_from_time and min/max functions PYTHON-99 Conflicts: cassandra/cqltypes.py cassandra/util.py --- cassandra/cqltypes.py | 40 ++++++++------ cassandra/util.py | 125 +++++++++++++++++++++++++++++++++++++++++- 2 files changed, 146 insertions(+), 19 deletions(-) diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index 0c8648d4f7..97f8504db1 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -34,24 +34,27 @@ import datetime from decimal import Decimal import io +import logging import re import socket import time +import six +from six.moves import range import sys from uuid import UUID +import warnings -import six -from six.moves import range from cassandra.marshal import (int8_pack, int8_unpack, uint16_pack, uint16_unpack, uint32_pack, uint32_unpack, int32_pack, int32_unpack, int64_pack, int64_unpack, float_pack, float_unpack, double_pack, double_unpack, varint_pack, varint_unpack) -from cassandra.util import OrderedMap, sortedset, Time +from cassandra import util apache_cassandra_type_prefix = 'org.apache.cassandra.db.marshal.' +log = logging.getLogger(__name__) if six.PY3: _number_types = frozenset((int, float)) @@ -72,16 +75,17 @@ def trim_if_startswith(s, prefix): def unix_time_from_uuid1(u): - return (u.time - 0x01B21DD213814000) / 10000000.0 - - -DATETIME_EPOC = datetime.datetime(1970, 1, 1) + msg = "'cassandra.cqltypes.unix_time_from_uuid1' has moved to 'cassandra.util'. This entry point will be removed in the next major version." + warnings.warn(msg, DeprecationWarning) + log.warn(msg) + return util.unix_time_from_uuid1(u) def datetime_from_timestamp(timestamp): - # PYTHON-119: workaround for Windows - dt = DATETIME_EPOC + datetime.timedelta(seconds=timestamp) - return dt + msg = "'cassandra.cqltypes.datetime_from_timestamp' has moved to 'cassandra.util'. This entry point will be removed in the next major version." + warnings.warn(msg, DeprecationWarning) + log.warn(msg) + return util.datetime_from_timestamp(timestamp) _casstypes = {} @@ -581,7 +585,7 @@ def my_timestamp(self): @staticmethod def deserialize(byts, protocol_version): timestamp = int64_unpack(byts) / 1000.0 - return datetime_from_timestamp(timestamp) + return util.datetime_from_timestamp(timestamp) @staticmethod def serialize(v, protocol_version): @@ -652,7 +656,7 @@ def serialize(val, protocol_version): @staticmethod def deserialize(byts, protocol_version): timestamp = SimpleDateType.seconds_per_day * (uint32_unpack(byts) - 2 ** 31) - dt = datetime_from_timestamp(timestamp) + dt = util.datetime_from_timestamp(timestamp) return datetime.date(dt.year, dt.month, dt.day) @@ -661,8 +665,8 @@ class TimeType(_CassandraType): @classmethod def validate(cls, val): - if not isinstance(val, Time): - val = Time(val) + if not isinstance(val, util.Time): + val = util.Time(val) return val @staticmethod @@ -670,12 +674,12 @@ def serialize(val, protocol_version): try: nano = val.nanosecond_time except AttributeError: - nano = Time(val).nanosecond_time + nano = util.Time(val).nanosecond_time return int64_pack(nano) @staticmethod def deserialize(byts, protocol_version): - return Time(int64_unpack(byts)) + return util.Time(int64_unpack(byts)) class UTF8Type(_CassandraType): @@ -771,7 +775,7 @@ class ListType(_SimpleParameterizedType): class SetType(_SimpleParameterizedType): typename = 'set' num_subtypes = 1 - adapter = sortedset + adapter = util.sortedset class MapType(_ParameterizedType): @@ -794,7 +798,7 @@ def deserialize_safe(cls, byts, protocol_version): length = 2 numelements = unpack(byts[:length]) p = length - themap = OrderedMap() + themap = util.OrderedMap() for _ in range(numelements): key_len = unpack(byts[p:p + length]) p += length diff --git a/cassandra/util.py b/cassandra/util.py index 14bc09cb79..c53bcb802b 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -1,4 +1,128 @@ from __future__ import with_statement +import calendar +import datetime +import random +import six +import uuid + +DATETIME_EPOC = datetime.datetime(1970, 1, 1) + + +def datetime_from_timestamp(timestamp): + """ + Creates a timezone-agnostic datetime from timestamp (in seconds) in a consistent manner. + Works around a Windows issue with large negative timestamps (PYTHON-119), + and rounding differences in Python 3.4 (PYTHON-340). + + :param timestamp: a unix timestamp, in seconds + + :rtype: datetime + """ + dt = DATETIME_EPOC + datetime.timedelta(seconds=timestamp) + return dt + + +def unix_time_from_uuid1(uuid_arg): + """ + Converts a version 1 :class:`uuid.UUID` to a timestamp with the same precision + as :meth:`time.time()` returns. This is useful for examining the + results of queries returning a v1 :class:`~uuid.UUID`. + + :param uuid_arg: a version 1 :class:`~uuid.UUID` + + :rtype: timestamp + + """ + return (uuid_arg.time - 0x01B21DD213814000) / 10000000.0 + + +def datetime_from_uuid1(uuid_arg): + """ + Creates a timezone-agnostic datetime from the timestamp in the + specified type-1 UUID. + + :param uuid_arg: a version 1 :class:`~uuid.UUID` + + :rtype: timestamp + + """ + return datetime_from_timestamp(unix_time_from_uuid1(uuid_arg)) + + +def min_uuid_from_time(timestamp): + """ + Generates the minimum TimeUUID (type 1) for a given timestamp, as compared by Cassandra. + + See :func:`uuid_from_time` for argument and return types. + """ + return uuid_from_time(timestamp, 0x80, 0x808080808080) # Cassandra does byte-wise comparison; fill with min signed bytes (0x80 = -128) + + +def max_uuid_from_time(timestamp): + """ + Generates the maximum TimeUUID (type 1) for a given timestamp, as compared by Cassandra. + + See :func:`uuid_from_time` for argument and return types. + """ + return uuid_from_time(timestamp, 0x3f7f, 0x7f7f7f7f7f7f) # Max signed bytes (0x7f = 127) + + +def uuid_from_time(time_arg, clock_seq=None, node=None): + """ + Converts a datetime or timestamp to a type 1 :class:`uuid.UUID`. + + :param time_arg: + The time to use for the timestamp portion of the UUID. + This can either be a :class:`datetime` object or a timestamp + in seconds (as returned from :meth:`time.time()`). + :type datetime: :class:`datetime` or timestamp + + :param clock_seq: + Clock sequence field for the UUID (up to 14 bits). If not specified, + a random sequence is generated. + :type clock_seq: int + + :param node: + None integer for the UUID (up to 48 bits). If not specified, this + field is randomized. + :type node: long + + :rtype: :class:`uuid.UUID` + + """ + if hasattr(time_arg, 'utctimetuple'): + seconds = int(calendar.timegm(time_arg.utctimetuple())) + microseconds = (seconds * 1e6) + time_arg.time().microsecond + else: + microseconds = int(time_arg * 1e6) + + # 0x01b21dd213814000 is the number of 100-ns intervals between the + # UUID epoch 1582-10-15 00:00:00 and the Unix epoch 1970-01-01 00:00:00. + intervals = int(microseconds * 10) + 0x01b21dd213814000 + + time_low = intervals & 0xffffffff + time_mid = (intervals >> 32) & 0xffff + time_hi_version = (intervals >> 48) & 0x0fff + + if clock_seq is None: + clock_seq = random.getrandbits(14) + + clock_seq_low = clock_seq & 0xff + clock_seq_hi_variant = 0x80 | ((clock_seq >> 8) & 0x3f) + + if node is None: + node = random.getrandbits(48) + node &= 0xffffffffff + + return uuid.UUID(fields=(time_low, time_mid, time_hi_version, + clock_seq_hi_variant, clock_seq_low, node), version=1) + +LOWEST_TIME_UUID = uuid.UUID('00000000-0000-1000-8080-808080808080') +""" The lowest possible TimeUUID, as sorted by Cassandra. """ + +HIGHEST_TIME_UUID = uuid.UUID('ffffffff-ffff-1fff-bf7f-7f7f7f7f7f7f') +""" The highest possible TimeUUID, as sorted by Cassandra. """ + try: from collections import OrderedDict @@ -557,7 +681,6 @@ def _intersect(self, other): return isect from collections import Mapping -import six from six.moves import cPickle From d2f5335fdf7e0d5e43b3b960f921299ddfe4068a Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 9 Mar 2015 11:58:19 -0500 Subject: [PATCH 1248/3726] cassandra.util doc update to include time functions --- docs/api/cassandra/util.rst | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/docs/api/cassandra/util.rst b/docs/api/cassandra/util.rst index 2e79758dea..848d4d5fc2 100644 --- a/docs/api/cassandra/util.rst +++ b/docs/api/cassandra/util.rst @@ -1,7 +1,5 @@ ``cassandra.util`` - Utilities =================================== -.. module:: cassandra.util - -.. autoclass:: OrderedMap - :members: +.. automodule:: cassandra.util + :members: From 2d5466f8a2cb384ec96abc1720c8418faa542cd3 Mon Sep 17 00:00:00 2001 From: Tyler Hobbs Date: Mon, 9 Mar 2015 16:58:58 -0500 Subject: [PATCH 1249/3726] Add Read and WriteFailure error classes --- cassandra/protocol.py | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/cassandra/protocol.py b/cassandra/protocol.py index ca628a433d..8b22f75d97 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -263,6 +263,35 @@ def to_exception(self): return ReadTimeout(self.summary_msg(), **self.info) +class ReadFailureMessage(RequestExecutionException): + summary = "Replica(s) failed to execute read" + error_code = 0x1300 + + @staticmethod + def recv_error_info(f): + return { + 'consistency': read_consistency_level(f), + 'received_responses': read_int(f), + 'required_responses': read_int(f), + 'failures': read_int(f), + 'data_retrieved': bool(read_byte(f)), + } + + +class WriteFailureMessage(RequestExecutionException): + summary = "Replica(s) failed to execute write" + error_code = 0x1500 + + @staticmethod + def recv_error_info(f): + return { + 'consistency': read_consistency_level(f), + 'received_responses': read_int(f), + 'required_responses': read_int(f), + 'failures': read_int(f), + 'write_type': WriteType.name_to_value[read_string(f)], + } + class SyntaxException(RequestValidationException): summary = 'Syntax error in CQL query' error_code = 0x2000 From 56b8a1463c61e8f0859941b7ae62a37a55470a2d Mon Sep 17 00:00:00 2001 From: Stefania Alborghetti Date: Tue, 10 Mar 2015 11:56:13 +0800 Subject: [PATCH 1250/3726] Added WriteFailure and ReadFailure exceptions --- cassandra/__init__.py | 70 +++++++++++++++++++++++++++++++++++++++++++ cassandra/protocol.py | 7 +++++ 2 files changed, 77 insertions(+) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 96f085767c..909eeb13da 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -221,6 +221,76 @@ def __init__(self, message, write_type=None, **kwargs): self.write_type = write_type +class Failure(Exception): + """ + Replicas sent a failure to the coordinator. + """ + + consistency = None + """ The requested :class:`ConsistencyLevel` """ + + required_responses = None + """ The number of required replica responses """ + + received_responses = None + """ + The number of replicas that responded before the coordinator timed out + the operation + """ + + failures = None + """ + The number of replicas that sent a failure message + """ + + def __init__(self, summary_message, consistency=None, required_responses=None, received_responses=None, failures=None): + self.consistency = consistency + self.required_responses = required_responses + self.received_responses = received_responses + self.failures = failures + Exception.__init__(self, summary_message + ' info=' + + repr({'consistency': consistency_value_to_name(consistency), + 'required_responses': required_responses, + 'received_responses': received_responses, + 'failures' : failures})) + + +class ReadFailure(Failure): + """ + A subclass of :exc:`Failure` for read operations. + + This indicates that the replicas sent a failure message to the coordinator. + """ + + data_retrieved = None + """ + A boolean indicating whether the requested data was retrieved + by the coordinator from any replicas before it timed out the + operation + """ + + def __init__(self, message, data_retrieved=None, **kwargs): + Failure.__init__(self, message, **kwargs) + self.data_retrieved = data_retrieved + + +class WriteFailure(Failure): + """ + A subclass of :exc:`Failure` for write operations. + + This indicates that the replicas sent a failure message to the coordinator. + """ + + write_type = None + """ + The type of write operation, enum on :class:`~cassandra.policies.WriteType` + """ + + def __init__(self, message, write_type=None, **kwargs): + Failure.__init__(self, message, **kwargs) + self.write_type = write_type + + class AlreadyExists(Exception): """ An attempt was made to create a keyspace or table that already exists. diff --git a/cassandra/protocol.py b/cassandra/protocol.py index 8b22f75d97..7f5c2cea2c 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -22,6 +22,7 @@ import io from cassandra import (Unavailable, WriteTimeout, ReadTimeout, + WriteFailure, ReadFailure, AlreadyExists, InvalidRequest, Unauthorized, UnsupportedOperation) from cassandra.marshal import (int32_pack, int32_unpack, uint16_pack, uint16_unpack, @@ -277,6 +278,9 @@ def recv_error_info(f): 'data_retrieved': bool(read_byte(f)), } + def to_exception(self): + return ReadFailure(self.summary_msg(), **self.info) + class WriteFailureMessage(RequestExecutionException): summary = "Replica(s) failed to execute write" @@ -292,6 +296,9 @@ def recv_error_info(f): 'write_type': WriteType.name_to_value[read_string(f)], } + def to_exception(self): + return WriteFailure(self.summary_msg(), **self.info) + class SyntaxException(RequestValidationException): summary = 'Syntax error in CQL query' error_code = 0x2000 From 2662ec95971e64eb584bef06a0df0f09a95d6123 Mon Sep 17 00:00:00 2001 From: Tyler Hobbs Date: Tue, 10 Mar 2015 14:56:23 -0500 Subject: [PATCH 1251/3726] Add read support for CompositeType --- cassandra/cqltypes.py | 17 +++++++++++++++++ tests/integration/standard/test_types.py | 20 ++++++++++++++++++++ 2 files changed, 37 insertions(+) diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index 0c8648d4f7..9ed36fa95b 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -990,6 +990,23 @@ def cql_parameterized_type(cls): typestring = cls.cass_parameterized_type(full=True) return "'%s'" % (typestring,) + @classmethod + def deserialize_safe(cls, byts, protocol_version): + result = [] + for subtype in cls.subtypes: + if not byts: + # CompositeType can have missing elements at the end + break + + element_length = uint16_unpack(byts[:2]) + element = byts[2:2 + element_length] + + # skip element length, element, and the EOC (one byte) + byts = byts[2 + element_length + 1:] + result.append(subtype.from_binary(element, protocol_version)) + + return tuple(result) + class DynamicCompositeType(CompositeType): typename = "'org.apache.cassandra.db.marshal.DynamicCompositeType'" diff --git a/tests/integration/standard/test_types.py b/tests/integration/standard/test_types.py index d4ef94c2d4..d42ab4c0a1 100644 --- a/tests/integration/standard/test_types.py +++ b/tests/integration/standard/test_types.py @@ -741,3 +741,23 @@ def test_nested_collections(self): s.execute("DROP TYPE %s_nested" % (name)) s.execute("DROP TYPE %s" % (name)) s.shutdown() + + def test_reading_composite_type(self): + s = self._session + s.execute(""" + CREATE TABLE composites ( + a int PRIMARY KEY, + b 'org.apache.cassandra.db.marshal.CompositeType(AsciiType, Int32Type)' + )""") + + # CompositeType string literals are split on ':' chars + s.execute("INSERT INTO composites (a, b) VALUES (0, 'abc:123')") + result = s.execute("SELECT * FROM composites WHERE a = 0")[0] + self.assertEqual(0, result.a) + self.assertEqual(('abc', 123), result.b) + + # CompositeType values can omit elements at the end + s.execute("INSERT INTO composites (a, b) VALUES (0, 'abc')") + result = s.execute("SELECT * FROM composites WHERE a = 0")[0] + self.assertEqual(0, result.a) + self.assertEqual(('abc',), result.b) From 253279e0c6017a55b5c926645006288b4369817f Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 12 Mar 2015 14:48:50 -0500 Subject: [PATCH 1252/3726] Review tweaks PYTHON-231 --- cassandra/cqltypes.py | 4 ++-- tests/unit/test_policies.py | 7 ++++--- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index 8b5a90872d..562b42dcd1 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -780,8 +780,8 @@ class MapType(_ParameterizedType): @classmethod def validate(cls, val): - key_type, subvaltype = cls.subtypes - return dict((key_type.validate(k), subvaltype.validate(v)) for (k, v) in six.iteritems(val)) + key_type, value_type = cls.subtypes + return dict((key_type.validate(k), value_type.validate(v)) for (k, v) in six.iteritems(val)) @classmethod def deserialize_safe(cls, byts, protocol_version): diff --git a/tests/unit/test_policies.py b/tests/unit/test_policies.py index ba389c7662..b232dcfe1a 100644 --- a/tests/unit/test_policies.py +++ b/tests/unit/test_policies.py @@ -15,7 +15,7 @@ try: import unittest2 as unittest except ImportError: - import unittest # noqa + import unittest # noqa from itertools import islice, cycle from mock import Mock @@ -139,7 +139,7 @@ def host_down(): threads.append(Thread(target=host_down)) # make the GIL switch after every instruction, maximizing - # the chace of race conditions + # the chance of race conditions check = six.PY2 or '__pypy__' in sys.builtin_module_names if check: original_interval = sys.getcheckinterval() @@ -363,6 +363,7 @@ def test_default_dc(self): policy.on_add(host_remote) self.assertFalse(policy.local_dc) + class TokenAwarePolicyTest(unittest.TestCase): def test_wrap_round_robin(self): @@ -520,7 +521,6 @@ def test_status_updates(self): qplan = list(policy.make_query_plan()) self.assertEqual(qplan, []) - def test_statement_keyspace(self): hosts = [Host(str(i), SimpleConvictionPolicy) for i in range(4)] for host in hosts: @@ -667,6 +667,7 @@ def test_schedule(self): ONE = ConsistencyLevel.ONE + class RetryPolicyTest(unittest.TestCase): def test_read_timeout(self): From fae9abde7a6da4620cc8fd336623253b3d65c8e9 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 12 Mar 2015 15:10:22 -0500 Subject: [PATCH 1253/3726] Test DateType from timestamp rounding anomaly workaround PYTHON-230 --- tests/unit/test_types.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tests/unit/test_types.py b/tests/unit/test_types.py index b5b70b2f62..29a28cd427 100644 --- a/tests/unit/test_types.py +++ b/tests/unit/test_types.py @@ -294,6 +294,11 @@ def test_datetype(self): self.assertRaises(ValueError, date_type.interpret_datestring, 'fakestring') + # work around rounding difference among Python versions (PYTHON-230) + expected = 1424817268.274 + self.assertEqual(DateType.deserialize(int64_pack(int(1000 * expected)), 0), datetime.datetime(2015, 2, 24, 22, 34, 28, 274000)) + + def test_write_read_string(self): with tempfile.TemporaryFile() as f: value = u'test' From dd1b0ed60c29ca63dafc81ac7743dc887583c396 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 13 Mar 2015 13:36:50 -0500 Subject: [PATCH 1254/3726] Simplify uuid time scale float literal PYTHON-99 peer review --- cassandra/util.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/util.py b/cassandra/util.py index c53bcb802b..baf7006f44 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -33,7 +33,7 @@ def unix_time_from_uuid1(uuid_arg): :rtype: timestamp """ - return (uuid_arg.time - 0x01B21DD213814000) / 10000000.0 + return (uuid_arg.time - 0x01B21DD213814000) / 1e7 def datetime_from_uuid1(uuid_arg): From dce4d17828b3a368172006a0946e2843993dace9 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 13 Mar 2015 13:38:42 -0500 Subject: [PATCH 1255/3726] Use polymorphism for OrderedMap type when key type is known PYTHON-231 peer review --- cassandra/cqltypes.py | 5 ++--- cassandra/util.py | 36 +++++++++++++++------------------- tests/unit/test_marshalling.py | 13 ++++++------ 3 files changed, 25 insertions(+), 29 deletions(-) diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index 562b42dcd1..7a347e8afb 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -48,7 +48,7 @@ int32_pack, int32_unpack, int64_pack, int64_unpack, float_pack, float_unpack, double_pack, double_unpack, varint_pack, varint_unpack) -from cassandra.util import OrderedMap, sortedset, Time +from cassandra.util import OrderedMapSerializedKey, sortedset, Time apache_cassandra_type_prefix = 'org.apache.cassandra.db.marshal.' @@ -794,8 +794,7 @@ def deserialize_safe(cls, byts, protocol_version): length = 2 numelements = unpack(byts[:length]) p = length - themap = OrderedMap() - themap._set_key_type(key_type, protocol_version) + themap = OrderedMapSerializedKey(key_type, protocol_version) for _ in range(numelements): key_len = unpack(byts[p:p + length]) p += length diff --git a/cassandra/util.py b/cassandra/util.py index e1f158ad4c..8dc2c590db 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -594,13 +594,6 @@ class OrderedMap(Mapping): ''' - cass_key_type = None - protocol_version = None - ''' - Set when deserializing a map from the server, when the key type is known. - This avoids re-serializing, and also helps normalize lookups on text types. - ''' - def __init__(self, *args, **kwargs): if len(args) > 1: raise TypeError('expected at most 1 arguments, got %d' % len(args)) @@ -628,18 +621,6 @@ def _insert(self, key, value): self._items.append((key, value)) self._index[flat_key] = len(self._items) - 1 - def _set_key_type(self, cass_type, protocol_version): - self.cass_key_type = cass_type - self.protocol_version = protocol_version - - def _insert_unchecked(self, key, flat_key, value): - ''' - Used when building from server response. - cass_key_type must be set in order for lookups to work later - ''' - self._items.append((key, value)) - self._index[flat_key] = len(self._items) - 1 - def __getitem__(self, key): try: index = self._index[self._serialize_key(key)] @@ -675,7 +656,22 @@ def __str__(self): return '{%s}' % ', '.join("%s: %s" % (k, v) for k, v in self._items) def _serialize_key(self, key): - return self.cass_key_type.serialize(key, self.protocol_version) if self.cass_key_type else cPickle.dumps(key) + return cPickle.dumps(key) + + +class OrderedMapSerializedKey(OrderedMap): + + def __init__(self, cass_type, protocol_version): + super(OrderedMapSerializedKey, self).__init__() + self.cass_key_type = cass_type + self.protocol_version = protocol_version + + def _insert_unchecked(self, key, flat_key, value): + self._items.append((key, value)) + self._index[flat_key] = len(self._items) - 1 + + def _serialize_key(self, key): + return self.cass_key_type.serialize(key, self.protocol_version) import datetime diff --git a/tests/unit/test_marshalling.py b/tests/unit/test_marshalling.py index dcd4b772c2..5706ef3f6f 100644 --- a/tests/unit/test_marshalling.py +++ b/tests/unit/test_marshalling.py @@ -23,8 +23,8 @@ from decimal import Decimal from uuid import UUID -from cassandra.cqltypes import lookup_casstype -from cassandra.util import OrderedMap, sortedset, Time +from cassandra.cqltypes import lookup_casstype, DecimalType, UTF8Type +from cassandra.util import OrderedMap, OrderedMapSerializedKey, sortedset, Time marshalled_value_pairs = ( # binary form, type, python native type @@ -75,7 +75,7 @@ (b'', 'MapType(AsciiType, BooleanType)', None), (b'', 'ListType(FloatType)', None), (b'', 'SetType(LongType)', None), - (b'\x00\x00', 'MapType(DecimalType, BooleanType)', OrderedMap()), + (b'\x00\x00', 'MapType(DecimalType, BooleanType)', OrderedMapSerializedKey(DecimalType, 0)), (b'\x00\x00', 'ListType(FloatType)', []), (b'\x00\x00', 'SetType(IntegerType)', sortedset()), (b'\x00\x01\x00\x10\xafYC\xa3\xea<\x11\xe1\xabc\xc4,\x03"y\xf0', 'ListType(TimeUUIDType)', [UUID(bytes=b'\xafYC\xa3\xea<\x11\xe1\xabc\xc4,\x03"y\xf0')]), @@ -84,9 +84,10 @@ (b'\x00\x00\x00\x00\x00\x00\x00\x01', 'TimeType', Time(1)) ) -ordered_map_value = OrderedMap([(u'\u307fbob', 199), - (u'', -1), - (u'\\', 0)]) +ordered_map_value = OrderedMapSerializedKey(UTF8Type, 2) +ordered_map_value._insert(u'\u307fbob', 199) +ordered_map_value._insert(u'', -1) +ordered_map_value._insert(u'\\', 0) # these following entries work for me right now, but they're dependent on # vagaries of internal python ordering for unordered types From 4f3c77c970e10774243e7ad7baed2d41c06c4790 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 13 Mar 2015 15:02:31 -0500 Subject: [PATCH 1256/3726] Introduce Date type to accomodate ranges outside datetime.[MIN|MAX}YEAR --- cassandra/cqltypes.py | 28 +++++--------- cassandra/util.py | 71 ++++++++++++++++++++++++++++++++++ tests/unit/test_marshalling.py | 6 +-- tests/unit/test_types.py | 4 +- 4 files changed, 85 insertions(+), 24 deletions(-) diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index 852397340a..fb96c7c9af 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -31,7 +31,6 @@ from binascii import unhexlify import calendar from collections import namedtuple -import datetime from decimal import Decimal import io import logging @@ -626,38 +625,29 @@ def serialize(timeuuid, protocol_version): class SimpleDateType(_CassandraType): typename = 'date' - seconds_per_day = 60 * 60 * 24 date_format = "%Y-%m-%d" @classmethod def validate(cls, val): - if isinstance(val, six.string_types): - val = cls.interpret_simpledate_string(val) - elif (not isinstance(val, datetime.date)) and not isinstance(val, six.integer_types): - raise TypeError('SimpleDateType arg must be a datetime.date, unsigned integer, or string in the format YYYY-MM-DD') + if not isinstance(val, util.Date): + val = util.Date(val) return val - @staticmethod - def interpret_simpledate_string(v): - date_time = datetime.datetime.strptime(v, SimpleDateType.date_format) - return datetime.date(date_time.year, date_time.month, date_time.day) - @staticmethod def serialize(val, protocol_version): # Values of the 'date'` type are encoded as 32-bit unsigned integers - # representing a number of days with "the epoch" at the center of the - # range (2^31). Epoch is January 1st, 1970 + # representing a number of days with epoch (January 1st, 1970) at the center of the + # range (2^31). try: - shifted = (calendar.timegm(val.timetuple()) // SimpleDateType.seconds_per_day) + 2 ** 31 + days = val.days_from_epoch except AttributeError: - shifted = val - return uint32_pack(shifted) + days = util.Date(val).days_from_epoch + return uint32_pack(days + 2 ** 31) @staticmethod def deserialize(byts, protocol_version): - timestamp = SimpleDateType.seconds_per_day * (uint32_unpack(byts) - 2 ** 31) - dt = util.datetime_from_timestamp(timestamp) - return datetime.date(dt.year, dt.month, dt.day) + days = uint32_unpack(byts) - 2 ** 31 + return util.Date(days) class TimeType(_CassandraType): diff --git a/cassandra/util.py b/cassandra/util.py index ab70840479..414cc9ecd3 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -892,3 +892,74 @@ def __repr__(self): def __str__(self): return "%02d:%02d:%02d.%09d" % (self.hour, self.minute, self.second, self.nanosecond) + + +class Date(object): + ''' + Idealized naive date: year, month, day + + Offers wider year range than datetime.date. Dates that cannot be represented + as a date (because datetime.MINYEAR, datetime.MAXYEAR), this type falls back + to printing days_from_epoch offset. + ''' + + MINUTE = 60 + HOUR = 60 * MINUTE + DAY = 24 * HOUR + + date_format = "%Y-%m-%d" + + days_from_epoch = 0 + + def __init__(self, value): + if isinstance(value, six.integer_types): + self.days_from_epoch = value + elif isinstance(value, (datetime.date, datetime.datetime)): + self._from_timetuple(value.timetuple()) + elif isinstance(value, six.string_types): + self._from_datestring(value) + else: + raise TypeError('Date arguments must be a whole number, datetime.date, or string') + + @property + def seconds(self): + return self.days_from_epoch * Date.DAY + + def date(self): + try: + dt = datetime_from_timestamp(self.seconds) + return datetime.date(dt.year, dt.month, dt.day) + except Exception: + raise ValueError("%r exceeds ranges for built-in datetime.date" % self) + + def _from_timetuple(self, t): + self.days_from_epoch = calendar.timegm(t) // Date.DAY + + def _from_datestring(self, s): + if s[0] == '+': + s = s[1:] + dt = datetime.datetime.strptime(s, self.date_format) + self._from_timetuple(dt.timetuple()) + + def __eq__(self, other): + if isinstance(other, Date): + return self.days_from_epoch == other.days_from_epoch + + if isinstance(other, six.integer_types): + return self.days_from_epoch == other + + try: + return self.date == other + except Exception: + return False + + def __repr__(self): + return "Date(%s)" % self.days_from_epoch + + def __str__(self): + try: + dt = datetime_from_timestamp(self.seconds) + return "%04d-%02d-%02d" % (dt.year, dt.month, dt.day) + except: + # If we overflow datetime.[MIN|M + return str(self.days_from_epoch) diff --git a/tests/unit/test_marshalling.py b/tests/unit/test_marshalling.py index 5706ef3f6f..da645c4d82 100644 --- a/tests/unit/test_marshalling.py +++ b/tests/unit/test_marshalling.py @@ -24,7 +24,7 @@ from uuid import UUID from cassandra.cqltypes import lookup_casstype, DecimalType, UTF8Type -from cassandra.util import OrderedMap, OrderedMapSerializedKey, sortedset, Time +from cassandra.util import OrderedMap, OrderedMapSerializedKey, sortedset, Time, Date marshalled_value_pairs = ( # binary form, type, python native type @@ -79,8 +79,8 @@ (b'\x00\x00', 'ListType(FloatType)', []), (b'\x00\x00', 'SetType(IntegerType)', sortedset()), (b'\x00\x01\x00\x10\xafYC\xa3\xea<\x11\xe1\xabc\xc4,\x03"y\xf0', 'ListType(TimeUUIDType)', [UUID(bytes=b'\xafYC\xa3\xea<\x11\xe1\xabc\xc4,\x03"y\xf0')]), - (b'\x80\x00\x00\x01', 'SimpleDateType', date(1970,1,2)), - (b'\x7f\xff\xff\xff', 'SimpleDateType', date(1969,12,31)), + (b'\x80\x00\x00\x01', 'SimpleDateType', Date(1)), + (b'\x7f\xff\xff\xff', 'SimpleDateType', Date('1969-12-31')), (b'\x00\x00\x00\x00\x00\x00\x00\x01', 'TimeType', Time(1)) ) diff --git a/tests/unit/test_types.py b/tests/unit/test_types.py index 7e1b041536..c35e99c915 100644 --- a/tests/unit/test_types.py +++ b/tests/unit/test_types.py @@ -130,7 +130,7 @@ def test_simpledate(self): Test cassandra.cqltypes.SimpleDateType() construction """ # from string - expected_date = datetime.date(1492, 10, 12) + expected_date = cassandra.util.Date(datetime.date(1492, 10, 12)) sd = SimpleDateType('1492-10-12') self.assertEqual(sd.val, expected_date) @@ -139,7 +139,7 @@ def test_simpledate(self): self.assertEqual(sd.val, expected_date) # int - expected_timestamp = calendar.timegm(expected_date.timetuple()) + expected_timestamp = calendar.timegm(expected_date.date().timetuple()) sd = SimpleDateType(expected_timestamp) self.assertEqual(sd.val, expected_timestamp) From d2fe776106c46df7eeb38a6b0b898ec9bb9bf693 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 16 Mar 2015 10:46:13 -0500 Subject: [PATCH 1257/3726] Expanded unit tests around cassandra.util.Date --- cassandra/util.py | 2 +- tests/unit/test_types.py | 69 +++++++++++++++++++++++++++++----------- 2 files changed, 52 insertions(+), 19 deletions(-) diff --git a/cassandra/util.py b/cassandra/util.py index 414cc9ecd3..f9c376a6db 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -949,7 +949,7 @@ def __eq__(self, other): return self.days_from_epoch == other try: - return self.date == other + return self.date() == other except Exception: return False diff --git a/tests/unit/test_types.py b/tests/unit/test_types.py index c35e99c915..76dfc2721e 100644 --- a/tests/unit/test_types.py +++ b/tests/unit/test_types.py @@ -17,7 +17,6 @@ import unittest # noqa from binascii import unhexlify -import calendar import datetime import tempfile import six @@ -115,9 +114,8 @@ def test_casstype_parameterized(self): self.assertEqual(LongType.cql_parameterized_type(), 'bigint') subtypes = (cassandra.cqltypes.UTF8Type, cassandra.cqltypes.UTF8Type) - self.assertEqual( - 'map', - cassandra.cqltypes.MapType.apply_parameters(subtypes).cql_parameterized_type()) + self.assertEqual('map', + cassandra.cqltypes.MapType.apply_parameters(subtypes).cql_parameterized_type()) def test_datetype_from_string(self): # Ensure all formats can be parsed, without exception @@ -129,24 +127,61 @@ def test_simpledate(self): """ Test cassandra.cqltypes.SimpleDateType() construction """ + Date = cassandra.util.Date + # from datetime + expected_dt = datetime.datetime(1492, 10, 12, 1, 1) + expected_date = Date(expected_dt) + self.assertEqual(str(expected_date), '1492-10-12') + # from string - expected_date = cassandra.util.Date(datetime.date(1492, 10, 12)) sd = SimpleDateType('1492-10-12') self.assertEqual(sd.val, expected_date) + sd = SimpleDateType('+1492-10-12') + self.assertEqual(sd.val, expected_date) - # date + # Date sd = SimpleDateType(expected_date) self.assertEqual(sd.val, expected_date) - # int - expected_timestamp = calendar.timegm(expected_date.date().timetuple()) - sd = SimpleDateType(expected_timestamp) - self.assertEqual(sd.val, expected_timestamp) + # date + sd = SimpleDateType(datetime.date(expected_dt.year, expected_dt.month, expected_dt.day)) + self.assertEqual(sd.val, expected_date) + + # days + sd = SimpleDateType(0) + self.assertEqual(sd.val, Date(datetime.date(1970, 1, 1))) + sd = SimpleDateType(-1) + self.assertEqual(sd.val, Date(datetime.date(1969, 12, 31))) + sd = SimpleDateType(1) + self.assertEqual(sd.val, Date(datetime.date(1970, 1, 2))) + # limits + min_builtin = Date(datetime.date(1, 1, 1)) + max_builtin = Date(datetime.date(9999, 12, 31)) + self.assertEqual(SimpleDateType(min_builtin.days_from_epoch).val, min_builtin) + self.assertEqual(SimpleDateType(max_builtin.days_from_epoch).val, max_builtin) + # just proving we can construct with on offset outside buildin range + self.assertEqual(SimpleDateType(min_builtin.days_from_epoch - 1).val.days_from_epoch, + min_builtin.days_from_epoch - 1) + self.assertEqual(SimpleDateType(max_builtin.days_from_epoch + 1).val.days_from_epoch, + max_builtin.days_from_epoch + 1) # no contruct - self.assertRaises(ValueError, SimpleDateType, '1999-10-10-bad-time') + self.assertRaises(ValueError, SimpleDateType, '-1999-10-10') self.assertRaises(TypeError, SimpleDateType, 1.234) + # str + date_str = '2015-03-16' + self.assertEqual(str(Date(date_str)), date_str) + # out of range + self.assertEqual(str(Date(2932897)), '2932897') + self.assertEqual(repr(Date(1)), 'Date(1)') + + # eq other types + self.assertEqual(Date(1234), 1234) + self.assertEqual(Date(1), datetime.date(1970, 1, 2)) + self.assertFalse(Date(2932897) == datetime.date(9999, 12, 31)) # date can't represent year > 9999 + self.assertEqual(Date(2932897), 2932897) + def test_time(self): """ Test cassandra.cqltypes.TimeType() construction @@ -180,16 +215,16 @@ def test_time(self): tt = TimeType('23:59:59.12345') tt = TimeType('23:59:59.123456') - self.assertEqual(tt.val, 23*one_hour + 59*one_minute + 59*one_second + 123*one_milli + 456*one_micro) + self.assertEqual(tt.val, 23 * one_hour + 59 * one_minute + 59 * one_second + 123 * one_milli + 456 * one_micro) tt = TimeType('23:59:59.1234567') - self.assertEqual(tt.val, 23*one_hour + 59*one_minute + 59*one_second + 123*one_milli + 456*one_micro + 700) + self.assertEqual(tt.val, 23 * one_hour + 59 * one_minute + 59 * one_second + 123 * one_milli + 456 * one_micro + 700) tt = TimeType('23:59:59.12345678') - self.assertEqual(tt.val, 23*one_hour + 59*one_minute + 59*one_second + 123*one_milli + 456*one_micro + 780) + self.assertEqual(tt.val, 23 * one_hour + 59 * one_minute + 59 * one_second + 123 * one_milli + 456 * one_micro + 780) tt = TimeType('23:59:59.123456789') - self.assertEqual(tt.val, 23*one_hour + 59*one_minute + 59*one_second + 123*one_milli + 456*one_micro + 789) + self.assertEqual(tt.val, 23 * one_hour + 59 * one_minute + 59 * one_second + 123 * one_milli + 456 * one_micro + 789) # from int tt = TimeType(12345678) @@ -200,7 +235,6 @@ def test_time(self): self.assertRaises(TypeError, TimeType, 1.234) self.assertRaises(TypeError, TimeType, datetime.datetime(2004, 12, 23, 11, 11, 1)) - def test_cql_typename(self): """ Smoke test cql_typename @@ -286,7 +320,7 @@ def test_datetype(self): self.assertEqual(DateType.deserialize(int64_pack(1000 * expected), 0), datetime.datetime.utcfromtimestamp(expected)) # beyond 32b - expected = 2**33 + expected = 2 ** 33 self.assertEqual(DateType.deserialize(int64_pack(1000 * expected), 0), datetime.datetime.utcfromtimestamp(expected)) # less than epoc (PYTHON-119) @@ -299,7 +333,6 @@ def test_datetype(self): expected = 1424817268.274 self.assertEqual(DateType.deserialize(int64_pack(int(1000 * expected)), 0), datetime.datetime(2015, 2, 24, 22, 34, 28, 274000)) - def test_write_read_string(self): with tempfile.TemporaryFile() as f: value = u'test' From 41175831d1671704e332d109999f39ce988318b0 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 16 Mar 2015 10:51:55 -0500 Subject: [PATCH 1258/3726] grammar correction --- cassandra/util.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cassandra/util.py b/cassandra/util.py index f9c376a6db..3280392064 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -898,8 +898,8 @@ class Date(object): ''' Idealized naive date: year, month, day - Offers wider year range than datetime.date. Dates that cannot be represented - as a date (because datetime.MINYEAR, datetime.MAXYEAR), this type falls back + Offers wider year range than datetime.date. For Dates that cannot be represented + as a datetime.date (because datetime.MINYEAR, datetime.MAXYEAR), this type falls back to printing days_from_epoch offset. ''' From 760b1958199e50b8ec2971e9c5bc207031e40afb Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 16 Mar 2015 11:55:52 -0500 Subject: [PATCH 1259/3726] Use repr for inner types in ...util.OrderedMap.__str__ --- cassandra/util.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/util.py b/cassandra/util.py index 3280392064..d69120d9a8 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -776,7 +776,7 @@ def __repr__(self): ', '.join("(%r, %r)" % (k, v) for k, v in self._items)) def __str__(self): - return '{%s}' % ', '.join("%s: %s" % (k, v) for k, v in self._items) + return '{%s}' % ', '.join("%r: %r" % (k, v) for k, v in self._items) def _serialize_key(self, key): return cPickle.dumps(key) From 46b2474728f219b61f155828880f7a7c8bda72f5 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 16 Mar 2015 13:17:50 -0500 Subject: [PATCH 1260/3726] Test OrderedMapSerializedKey PYTHON-231 --- tests/unit/test_orderedmap.py | 116 ++++++++++++++++++++++------------ 1 file changed, 76 insertions(+), 40 deletions(-) diff --git a/tests/unit/test_orderedmap.py b/tests/unit/test_orderedmap.py index 48666fa9eb..25786a7853 100644 --- a/tests/unit/test_orderedmap.py +++ b/tests/unit/test_orderedmap.py @@ -17,8 +17,8 @@ except ImportError: import unittest # noqa -from cassandra.util import OrderedMap -from cassandra.cqltypes import EMPTY +from cassandra.util import OrderedMap, OrderedMapSerializedKey +from cassandra.cqltypes import EMPTY, UTF8Type, lookup_casstype import six class OrderedMapTest(unittest.TestCase): @@ -37,90 +37,95 @@ def test_init(self): self.assertEqual(d['key1'], 'v1') self.assertEqual(d['key2'], 'v2') + with self.assertRaises(TypeError): + OrderedMap('too', 'many', 'args') + def test_contains(self): keys = ['first', 'middle', 'last'] - od = OrderedMap() + om = OrderedMap() - od = OrderedMap(zip(keys, range(len(keys)))) + om = OrderedMap(zip(keys, range(len(keys)))) for k in keys: - self.assertTrue(k in od) - self.assertFalse(k not in od) + self.assertTrue(k in om) + self.assertFalse(k not in om) - self.assertTrue('notthere' not in od) - self.assertFalse('notthere' in od) + self.assertTrue('notthere' not in om) + self.assertFalse('notthere' in om) def test_keys(self): keys = ['first', 'middle', 'last'] - od = OrderedMap(zip(keys, range(len(keys)))) + om = OrderedMap(zip(keys, range(len(keys)))) - self.assertListEqual(list(od.keys()), keys) + self.assertListEqual(list(om.keys()), keys) def test_values(self): keys = ['first', 'middle', 'last'] values = list(range(len(keys))) - od = OrderedMap(zip(keys, values)) + om = OrderedMap(zip(keys, values)) - self.assertListEqual(list(od.values()), values) + self.assertListEqual(list(om.values()), values) def test_items(self): keys = ['first', 'middle', 'last'] items = list(zip(keys, range(len(keys)))) - od = OrderedMap(items) + om = OrderedMap(items) - self.assertListEqual(list(od.items()), items) + self.assertListEqual(list(om.items()), items) def test_get(self): keys = ['first', 'middle', 'last'] - od = OrderedMap(zip(keys, range(len(keys)))) + om = OrderedMap(zip(keys, range(len(keys)))) for v, k in enumerate(keys): - self.assertEqual(od.get(k), v) + self.assertEqual(om.get(k), v) - self.assertEqual(od.get('notthere', 'default'), 'default') - self.assertIsNone(od.get('notthere')) + self.assertEqual(om.get('notthere', 'default'), 'default') + self.assertIsNone(om.get('notthere')) def test_equal(self): d1 = {'one': 1} d12 = {'one': 1, 'two': 2} - od1 = OrderedMap({'one': 1}) - od12 = OrderedMap([('one', 1), ('two', 2)]) - od21 = OrderedMap([('two', 2), ('one', 1)]) - - self.assertEqual(od1, d1) - self.assertEqual(od12, d12) - self.assertEqual(od21, d12) - self.assertNotEqual(od1, od12) - self.assertNotEqual(od12, od1) - self.assertNotEqual(od12, od21) - self.assertNotEqual(od1, d12) - self.assertNotEqual(od12, d1) - self.assertNotEqual(od1, EMPTY) + om1 = OrderedMap({'one': 1}) + om12 = OrderedMap([('one', 1), ('two', 2)]) + om21 = OrderedMap([('two', 2), ('one', 1)]) + + self.assertEqual(om1, d1) + self.assertEqual(om12, d12) + self.assertEqual(om21, d12) + self.assertNotEqual(om1, om12) + self.assertNotEqual(om12, om1) + self.assertNotEqual(om12, om21) + self.assertNotEqual(om1, d12) + self.assertNotEqual(om12, d1) + self.assertNotEqual(om1, EMPTY) + + self.assertFalse(OrderedMap([('three', 3), ('four', 4)]) == d12) def test_getitem(self): keys = ['first', 'middle', 'last'] - od = OrderedMap(zip(keys, range(len(keys)))) + om = OrderedMap(zip(keys, range(len(keys)))) for v, k in enumerate(keys): - self.assertEqual(od[k], v) + self.assertEqual(om[k], v) with self.assertRaises(KeyError): - od['notthere'] + om['notthere'] def test_iter(self): keys = ['first', 'middle', 'last'] values = list(range(len(keys))) items = list(zip(keys, values)) - od = OrderedMap(items) + om = OrderedMap(items) - itr = iter(od) + itr = iter(om) self.assertEqual(sum([1 for _ in itr]), len(keys)) self.assertRaises(StopIteration, six.next, itr) - self.assertEqual(list(iter(od)), keys) - self.assertEqual(list(six.iteritems(od)), items) - self.assertEqual(list(six.itervalues(od)), values) + self.assertEqual(list(iter(om)), keys) + self.assertEqual(list(six.iteritems(om)), items) + self.assertEqual(list(six.itervalues(om)), values) def test_len(self): self.assertEqual(len(OrderedMap()), 0) @@ -129,4 +134,35 @@ def test_len(self): def test_mutable_keys(self): d = {'1': 1} s = set([1, 2, 3]) - od = OrderedMap([(d, 'dict'), (s, 'set')]) + om = OrderedMap([(d, 'dict'), (s, 'set')]) + + def test_strings(self): + # changes in 3.x + d = {'map': 'inner'} + s = set([1, 2, 3]) + self.assertEqual(repr(OrderedMap([('two', 2), ('one', 1), (d, 'value'), (s, 'another')])), + "OrderedMap([('two', 2), ('one', 1), (%r, 'value'), (%r, 'another')])" % (d, s)) + + self.assertEqual(str(OrderedMap([('two', 2), ('one', 1), (d, 'value'), (s, 'another')])), + "{'two': 2, 'one': 1, %r: 'value', %r: 'another'}" % (d, s)) + + +class OrderedMapSerializedKeyTest(unittest.TestCase): + def test_init(self): + om = OrderedMapSerializedKey(UTF8Type, 2) + self.assertEqual(om, {}) + + def test_normalized_lookup(self): + key_type = lookup_casstype('MapType(UTF8Type, Int32Type)') + protocol_version = 3 + om = OrderedMapSerializedKey(key_type, protocol_version) + key_ascii = {'one': 1} + key_unicode = {u'two': 2} + om._insert_unchecked(key_ascii, key_type.serialize(key_ascii, protocol_version), object()) + om._insert_unchecked(key_unicode, key_type.serialize(key_unicode, protocol_version), object()) + + # type lookup is normalized by key_type + # PYTHON-231 + self.assertIs(om[{'one': 1}], om[{u'one': 1}]) + self.assertIs(om[{'two': 2}], om[{u'two': 2}]) + self.assertIsNot(om[{'one': 1}], om[{'two': 2}]) From 8409730775501fb733e435adc20d9a2607fadc16 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 16 Mar 2015 14:38:52 -0500 Subject: [PATCH 1261/3726] Change arg order in uuid_from_time to match uuid.uuid1() --- cassandra/util.py | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/cassandra/util.py b/cassandra/util.py index d69120d9a8..469e81f1f8 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -55,7 +55,7 @@ def min_uuid_from_time(timestamp): See :func:`uuid_from_time` for argument and return types. """ - return uuid_from_time(timestamp, 0x80, 0x808080808080) # Cassandra does byte-wise comparison; fill with min signed bytes (0x80 = -128) + return uuid_from_time(timestamp, 0x808080808080, 0x80) # Cassandra does byte-wise comparison; fill with min signed bytes (0x80 = -128) def max_uuid_from_time(timestamp): @@ -64,10 +64,10 @@ def max_uuid_from_time(timestamp): See :func:`uuid_from_time` for argument and return types. """ - return uuid_from_time(timestamp, 0x3f7f, 0x7f7f7f7f7f7f) # Max signed bytes (0x7f = 127) + return uuid_from_time(timestamp, 0x7f7f7f7f7f7f, 0x3f7f) # Max signed bytes (0x7f = 127) -def uuid_from_time(time_arg, clock_seq=None, node=None): +def uuid_from_time(time_arg, node=None, clock_seq=None): """ Converts a datetime or timestamp to a type 1 :class:`uuid.UUID`. @@ -77,16 +77,16 @@ def uuid_from_time(time_arg, clock_seq=None, node=None): in seconds (as returned from :meth:`time.time()`). :type datetime: :class:`datetime` or timestamp - :param clock_seq: - Clock sequence field for the UUID (up to 14 bits). If not specified, - a random sequence is generated. - :type clock_seq: int - :param node: None integer for the UUID (up to 48 bits). If not specified, this field is randomized. :type node: long + :param clock_seq: + Clock sequence field for the UUID (up to 14 bits). If not specified, + a random sequence is generated. + :type clock_seq: int + :rtype: :class:`uuid.UUID` """ @@ -112,7 +112,6 @@ def uuid_from_time(time_arg, clock_seq=None, node=None): if node is None: node = random.getrandbits(48) - node &= 0xffffffffff return uuid.UUID(fields=(time_low, time_mid, time_hi_version, clock_seq_hi_variant, clock_seq_low, node), version=1) From 66791fd9f6e222773a51d2430d086ea5cead945c Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 16 Mar 2015 15:14:19 -0500 Subject: [PATCH 1262/3726] Bounds checking for uuid clock_seq --- cassandra/util.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/cassandra/util.py b/cassandra/util.py index 469e81f1f8..6de2d084cb 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -106,6 +106,10 @@ def uuid_from_time(time_arg, node=None, clock_seq=None): if clock_seq is None: clock_seq = random.getrandbits(14) + else: + if clock_seq > 0x3fff: + raise ValueError('clock_seq is out of range (need a 14-bit value)') + clock_seq_low = clock_seq & 0xff clock_seq_hi_variant = 0x80 | ((clock_seq >> 8) & 0x3f) From ba5db699ca39b1b7a9a05646fc58d7c30a2ad3f4 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 17 Mar 2015 08:19:56 -0500 Subject: [PATCH 1263/3726] time utility function tests PYTHON-99 --- tests/unit/test_time_util.py | 119 +++++++++++++++++++++++++++++++++++ 1 file changed, 119 insertions(+) create mode 100644 tests/unit/test_time_util.py diff --git a/tests/unit/test_time_util.py b/tests/unit/test_time_util.py new file mode 100644 index 0000000000..80f2d72f17 --- /dev/null +++ b/tests/unit/test_time_util.py @@ -0,0 +1,119 @@ +# Copyright 2013-2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa + +from cassandra import marshal +from cassandra import util +import calendar +import datetime +import time +import uuid + +class TimeUtilTest(unittest.TestCase): + def test_datetime_from_timestamp(self): + self.assertEqual(util.datetime_from_timestamp(0), datetime.datetime(1970, 1, 1)) + # large negative; test PYTHON-110 workaround for windows + self.assertEqual(util.datetime_from_timestamp(-62135596800), datetime.datetime(1, 1, 1)) + self.assertEqual(util.datetime_from_timestamp(-62135596199), datetime.datetime(1, 1, 1, 0, 10, 1)) + + self.assertEqual(util.datetime_from_timestamp(253402300799), datetime.datetime(9999, 12, 31, 23, 59, 59)) + + self.assertEqual(util.datetime_from_timestamp(0.123456), datetime.datetime(1970, 1, 1, 0, 0, 0, 123456)) + + def test_times_from_uuid1(self): + node = uuid.getnode() + now = time.time() + u = uuid.uuid1(node, 0) + + t = util.unix_time_from_uuid1(u) + self.assertAlmostEqual(now, t, 3) + + dt = util.datetime_from_uuid1(u) + t = calendar.timegm(dt.timetuple()) + dt.microsecond / 1e6 + self.assertAlmostEqual(now, t, 3) + + def test_uuid_from_time(self): + t = time.time() + seq = 0x2aa5 + node = uuid.getnode() + u = util.uuid_from_time(t, node, seq) + self.assertEqual(util.unix_time_from_uuid1(u), t) + self.assertEqual(u.node, node) + self.assertEqual(u.clock_seq, seq) + + # random node + u1 = util.uuid_from_time(t, clock_seq=seq) + u2 = util.uuid_from_time(t, clock_seq=seq) + self.assertEqual(util.unix_time_from_uuid1(u1), t) + self.assertEqual(util.unix_time_from_uuid1(u2), t) + self.assertEqual(u.clock_seq, seq) + # not impossible, but we shouldn't get the same value twice + self.assertNotEqual(u1.node, u2.node) + + # random seq + u1 = util.uuid_from_time(t, node=node) + u2 = util.uuid_from_time(t, node=node) + self.assertEqual(util.unix_time_from_uuid1(u1), t) + self.assertEqual(util.unix_time_from_uuid1(u2), t) + self.assertEqual(u.node, node) + # not impossible, but we shouldn't get the same value twice + self.assertNotEqual(u1.clock_seq, u2.clock_seq) + + # node too large + with self.assertRaises(ValueError): + u = util.uuid_from_time(t, node=2 ** 48) + + # clock_seq too large + with self.assertRaises(ValueError): + u = util.uuid_from_time(t, clock_seq=0x4000) + + # construct from datetime + dt = util.datetime_from_timestamp(t) + u = util.uuid_from_time(dt, node, seq) + self.assertEqual(util.unix_time_from_uuid1(u), t) + self.assertEqual(u.node, node) + self.assertEqual(u.clock_seq, seq) + +# 0 1 2 3 +# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +# | time_low | +# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +# | time_mid | time_hi_and_version | +# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +# |clk_seq_hi_res | clk_seq_low | node (0-1) | +# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +# | node (2-5) | +# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + def test_min_uuid(self): + u = util.min_uuid_from_time(0) + # cassandra does a signed comparison of the remaining bytes + for byte in u.bytes[8:]: + self.assertEqual(marshal.int8_unpack(byte), -128) + + def test_max_uuid(self): + u = util.max_uuid_from_time(0) + # cassandra does a signed comparison of the remaining bytes + # the first non-time byte has the variant in it + # This byte is always negative, but should be the smallest negative + # number with high-order bits '10' + print u + print ord(u.bytes[8]) + self.assertEqual(marshal.int8_unpack(u.bytes[8]), -65) + for byte in u.bytes[9:]: + self.assertEqual(marshal.int8_unpack(byte), 127) From 7ad219962d6978f32b93c0d6282887ad8b1459cd Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 17 Mar 2015 08:31:29 -0500 Subject: [PATCH 1264/3726] Better coverage for util.Time --- tests/unit/test_types.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/tests/unit/test_types.py b/tests/unit/test_types.py index 76dfc2721e..14b3d5f351 100644 --- a/tests/unit/test_types.py +++ b/tests/unit/test_types.py @@ -186,6 +186,7 @@ def test_time(self): """ Test cassandra.cqltypes.TimeType() construction """ + Time = cassandra.util.Time one_micro = 1000 one_milli = 1000 * one_micro one_second = 1000 * one_milli @@ -230,9 +231,23 @@ def test_time(self): tt = TimeType(12345678) self.assertEqual(tt.val, 12345678) + # from time + expected_time = datetime.time(12, 1, 2, 3) + tt = TimeType(expected_time) + self.assertEqual(tt.val, expected_time) + + # util.Time self equality + self.assertEqual(Time(1234), Time(1234)) + + # str + time_str = '12:13:14.123456789' + self.assertEqual(str(Time(time_str)), time_str) + self.assertEqual(repr(Time(1)), 'Time(1)') + # no construct self.assertRaises(ValueError, TimeType, '1999-10-10 11:11:11.1234') self.assertRaises(TypeError, TimeType, 1.234) + self.assertRaises(ValueError, TimeType, 123456789000000) self.assertRaises(TypeError, TimeType, datetime.datetime(2004, 12, 23, 11, 11, 1)) def test_cql_typename(self): From af15b8aa0f42b22093df08d1e395da9456e99a91 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 17 Mar 2015 09:16:59 -0500 Subject: [PATCH 1265/3726] Make time uuid tests work in Python 3 --- tests/unit/test_time_util.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/tests/unit/test_time_util.py b/tests/unit/test_time_util.py index 80f2d72f17..f32b6a6046 100644 --- a/tests/unit/test_time_util.py +++ b/tests/unit/test_time_util.py @@ -24,6 +24,7 @@ import time import uuid + class TimeUtilTest(unittest.TestCase): def test_datetime_from_timestamp(self): self.assertEqual(util.datetime_from_timestamp(0), datetime.datetime(1970, 1, 1)) @@ -103,8 +104,8 @@ def test_uuid_from_time(self): def test_min_uuid(self): u = util.min_uuid_from_time(0) # cassandra does a signed comparison of the remaining bytes - for byte in u.bytes[8:]: - self.assertEqual(marshal.int8_unpack(byte), -128) + for i in range(8, 16): + self.assertEqual(marshal.int8_unpack(u.bytes[i:i + 1]), -128) def test_max_uuid(self): u = util.max_uuid_from_time(0) @@ -112,8 +113,6 @@ def test_max_uuid(self): # the first non-time byte has the variant in it # This byte is always negative, but should be the smallest negative # number with high-order bits '10' - print u - print ord(u.bytes[8]) - self.assertEqual(marshal.int8_unpack(u.bytes[8]), -65) - for byte in u.bytes[9:]: - self.assertEqual(marshal.int8_unpack(byte), 127) + self.assertEqual(marshal.int8_unpack(u.bytes[8:9]), -65) + for i in range(9, 16): + self.assertEqual(marshal.int8_unpack(u.bytes[i:i + 1]), 127) From d4b462e01bdff8b8cbfc6f07bed3e23b36798a7f Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 17 Mar 2015 09:41:50 -0500 Subject: [PATCH 1266/3726] Make time comparisons AlmostEqual to account for differing precision --- tests/unit/test_time_util.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/tests/unit/test_time_util.py b/tests/unit/test_time_util.py index f32b6a6046..0a3209a5f9 100644 --- a/tests/unit/test_time_util.py +++ b/tests/unit/test_time_util.py @@ -53,15 +53,17 @@ def test_uuid_from_time(self): seq = 0x2aa5 node = uuid.getnode() u = util.uuid_from_time(t, node, seq) - self.assertEqual(util.unix_time_from_uuid1(u), t) + # using AlmostEqual because time precision is different for + # some platforms + self.assertAlmostEqual(util.unix_time_from_uuid1(u), t, 4) self.assertEqual(u.node, node) self.assertEqual(u.clock_seq, seq) # random node u1 = util.uuid_from_time(t, clock_seq=seq) u2 = util.uuid_from_time(t, clock_seq=seq) - self.assertEqual(util.unix_time_from_uuid1(u1), t) - self.assertEqual(util.unix_time_from_uuid1(u2), t) + self.assertAlmostEqual(util.unix_time_from_uuid1(u1), t, 4) + self.assertAlmostEqual(util.unix_time_from_uuid1(u2), t, 4) self.assertEqual(u.clock_seq, seq) # not impossible, but we shouldn't get the same value twice self.assertNotEqual(u1.node, u2.node) @@ -69,8 +71,8 @@ def test_uuid_from_time(self): # random seq u1 = util.uuid_from_time(t, node=node) u2 = util.uuid_from_time(t, node=node) - self.assertEqual(util.unix_time_from_uuid1(u1), t) - self.assertEqual(util.unix_time_from_uuid1(u2), t) + self.assertAlmostEqual(util.unix_time_from_uuid1(u1), t, 4) + self.assertAlmostEqual(util.unix_time_from_uuid1(u2), t, 4) self.assertEqual(u.node, node) # not impossible, but we shouldn't get the same value twice self.assertNotEqual(u1.clock_seq, u2.clock_seq) @@ -86,7 +88,7 @@ def test_uuid_from_time(self): # construct from datetime dt = util.datetime_from_timestamp(t) u = util.uuid_from_time(dt, node, seq) - self.assertEqual(util.unix_time_from_uuid1(u), t) + self.assertAlmostEqual(util.unix_time_from_uuid1(u), t, 4) self.assertEqual(u.node, node) self.assertEqual(u.clock_seq, seq) From bde53b908c1e0c4b7a11716f2e6fa97516478c8f Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 17 Mar 2015 10:57:28 -0500 Subject: [PATCH 1267/3726] remove debug print statement --- cassandra/cqlengine/usertype.py | 1 - 1 file changed, 1 deletion(-) diff --git a/cassandra/cqlengine/usertype.py b/cassandra/cqlengine/usertype.py index 916f738814..fa94293d5c 100644 --- a/cassandra/cqlengine/usertype.py +++ b/cassandra/cqlengine/usertype.py @@ -39,7 +39,6 @@ def __init__(self, **values): self._values[name] = value_mngr def __eq__(self, other): - print self.__class__, other.__class__ if self.__class__ != other.__class__: return False From 3fa3f8daaf86d4f63098dd27e58cb204c3f292a8 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 17 Mar 2015 11:16:19 -0500 Subject: [PATCH 1268/3726] Update imports for moved exceptions, in cqlengine tests --- .../management/test_compaction_settings.py | 11 +----- .../cqlengine/management/test_management.py | 37 ++++++++++--------- .../cqlengine/model/test_model_io.py | 19 ++++------ .../cqlengine/model/test_updates.py | 3 +- .../cqlengine/query/test_updates.py | 2 +- 5 files changed, 31 insertions(+), 41 deletions(-) diff --git a/tests/integration/cqlengine/management/test_compaction_settings.py b/tests/integration/cqlengine/management/test_compaction_settings.py index adb4afa784..0efd91570d 100644 --- a/tests/integration/cqlengine/management/test_compaction_settings.py +++ b/tests/integration/cqlengine/management/test_compaction_settings.py @@ -17,7 +17,7 @@ from mock import patch from cassandra.cqlengine import columns, SizeTieredCompactionStrategy, LeveledCompactionStrategy -from cassandra.cqlengine.exceptions import CQLEngineException +from cassandra.cqlengine import CQLEngineException from cassandra.cqlengine.management import get_compaction_options, drop_table, sync_table, get_table_settings from cassandra.cqlengine.models import Model @@ -38,8 +38,7 @@ def assert_option_fails(self, key): key = "__compaction_{}__".format(key) - with patch.object(self.model, key, 10), \ - self.assertRaises(CQLEngineException): + with patch.object(self.model, key, 10), self.assertRaises(CQLEngineException): get_compaction_options(self.model) @@ -97,7 +96,6 @@ class LeveledcompactionTestTable(Model): user_id = columns.UUID(primary_key=True) name = columns.Text() -from cassandra.cqlengine.management import schema_columnfamilies class AlterTableTest(BaseCassEngTestCase): @@ -158,7 +156,6 @@ def test_alter_actually_alters(self): self.assertRegexpMatches(table_settings.options['compaction_strategy_class'], '.*SizeTieredCompactionStrategy$') - def test_alter_options(self): class AlterTable(Model): @@ -175,7 +172,6 @@ class AlterTable(Model): sync_table(AlterTable) - class EmptyCompactionTest(BaseCassEngTestCase): def test_empty_compaction(self): class EmptyCompactionModel(Model): @@ -202,7 +198,6 @@ class CompactionSizeTieredModel(Model): name = columns.Text() - class OptionsTest(BaseCassEngTestCase): def test_all_size_tiered_options(self): @@ -232,7 +227,6 @@ class AllSizeTieredOptionsModel(Model): self.assertDictEqual(options, expected) - def test_all_leveled_options(self): class AllLeveledOptionsModel(Model): @@ -250,4 +244,3 @@ class AllLeveledOptionsModel(Model): options = json.loads(settings['compaction_strategy_options']) self.assertDictEqual(options, {u'sstable_size_in_mb': u'64'}) - diff --git a/tests/integration/cqlengine/management/test_management.py b/tests/integration/cqlengine/management/test_management.py index f209af35f9..97608f6e36 100644 --- a/tests/integration/cqlengine/management/test_management.py +++ b/tests/integration/cqlengine/management/test_management.py @@ -19,9 +19,9 @@ from cassandra.cqlengine import CACHING_ALL, CACHING_NONE from cassandra.cqlengine.connection import get_session, get_cluster -from cassandra.cqlengine.exceptions import CQLEngineException +from cassandra.cqlengine import CQLEngineException from cassandra.cqlengine import management -from cassandra.cqlengine.management import get_fields, sync_table, drop_table +from cassandra.cqlengine.management import get_fields, sync_table, drop_table from cassandra.cqlengine.models import Model from cassandra.cqlengine import columns, SizeTieredCompactionStrategy, LeveledCompactionStrategy @@ -29,6 +29,7 @@ from tests.integration.cqlengine.base import BaseCassEngTestCase from tests.integration.cqlengine.query.test_queryset import TestModel + class KeyspaceManagementTest(BaseCassEngTestCase): def test_create_drop_succeeeds(self): cluster = get_cluster() @@ -71,18 +72,21 @@ def test_multiple_deletes_dont_fail(self): drop_table(TestModel) drop_table(TestModel) + class LowercaseKeyModel(Model): first_key = columns.Integer(primary_key=True) second_key = columns.Integer(primary_key=True) some_data = columns.Text() + class CapitalizedKeyModel(Model): firstKey = columns.Integer(primary_key=True) secondKey = columns.Integer(primary_key=True) someData = columns.Text() + class PrimaryKeysOnlyModel(Model): __compaction__ = LeveledCompactionStrategy @@ -109,6 +113,7 @@ class FirstModel(Model): second_key = columns.UUID() third_key = columns.Text() + class SecondModel(Model): __table_name__ = 'first_model' @@ -117,6 +122,7 @@ class SecondModel(Model): third_key = columns.Text() fourth_key = columns.Text() + class ThirdModel(Model): __table_name__ = 'first_model' @@ -126,6 +132,7 @@ class ThirdModel(Model): # removed fourth key, but it should stay in the DB blah = columns.Map(columns.Text, columns.Text) + class FourthModel(Model): __table_name__ = 'first_model' @@ -135,6 +142,7 @@ class FourthModel(Model): # removed fourth key, but it should stay in the DB renamed = columns.Map(columns.Text, columns.Text, db_field='blah') + class AddColumnTest(BaseCassEngTestCase): def setUp(self): drop_table(FirstModel) @@ -180,6 +188,7 @@ class ModelWithTableProperties(Model): ModelWithTableProperties.__index_interval__ = 98706 ModelWithTableProperties.__default_time_to_live__ = 4756 + class TablePropertiesTests(BaseCassEngTestCase): def setUp(self): @@ -233,17 +242,17 @@ def test_table_property_update(self): table_settings = management.get_table_settings(ModelWithTableProperties).options expected = {'bloom_filter_fp_chance': 0.66778, - 'comment': 'xirAkRWZVVvsmzRvXamiEcQkshkUIDINVJZgLYSdnGHweiBrAiJdLJkVohdRy', - 'gc_grace_seconds': 96362, - 'read_repair_chance': 0.2989, - #'local_read_repair_chance': 0.12732, + 'comment': 'xirAkRWZVVvsmzRvXamiEcQkshkUIDINVJZgLYSdnGHweiBrAiJdLJkVohdRy', + 'gc_grace_seconds': 96362, + 'read_repair_chance': 0.2989, + # 'local_read_repair_chance': 0.12732, } if CASSANDRA_VERSION >= '2.0.0': - expected['memtable_flush_period_in_ms'] = 60210 - expected['default_time_to_live'] = 65178 + expected['memtable_flush_period_in_ms'] = 60210 + expected['default_time_to_live'] = 65178 if CASSANDRA_VERSION == '2.0.0': - expected['index_interval'] = 94207 + expected['index_interval'] = 94207 # these featuers removed in cassandra 2.1 if CASSANDRA_VERSION <= '2.0.0': @@ -274,9 +283,7 @@ def test_sync_table_works_with_primary_keys_only_tables(self): assert LeveledCompactionStrategy in table_settings.options['compaction_strategy_class'] - # Now we are "updating" the table: - # setting up something to change PrimaryKeysOnlyModel.__compaction__ = SizeTieredCompactionStrategy @@ -290,6 +297,7 @@ def test_sync_table_works_with_primary_keys_only_tables(self): table_settings = management.get_table_settings(PrimaryKeysOnlyModel) assert SizeTieredCompactionStrategy in table_settings.options['compaction_strategy_class'] + class NonModelFailureTest(BaseCassEngTestCase): class FakeModel(object): pass @@ -324,9 +332,4 @@ class StaticModel(Model): sync_table(StaticModel) assert len(m2.call_args_list) == 1 - assert "ALTER" not in m2.call_args[0][0].query_string - - - - - + assert "ALTER" not in m2.call_args[0][0].query_string diff --git a/tests/integration/cqlengine/model/test_model_io.py b/tests/integration/cqlengine/model/test_model_io.py index b38672d388..621c6c6638 100644 --- a/tests/integration/cqlengine/model/test_model_io.py +++ b/tests/integration/cqlengine/model/test_model_io.py @@ -16,7 +16,7 @@ import random from datetime import date from operator import itemgetter -from cassandra.cqlengine.exceptions import CQLEngineException +from cassandra.cqlengine import CQLEngineException from tests.integration.cqlengine.base import BaseCassEngTestCase from cassandra.cqlengine.management import sync_table @@ -24,16 +24,10 @@ from cassandra.cqlengine.models import Model from cassandra.cqlengine import columns -class TestModel(Model): - - id = columns.UUID(primary_key=True, default=lambda:uuid4()) - count = columns.Integer() - text = columns.Text(required=False) - a_bool = columns.Boolean(default=False) class TestModel(Model): - id = columns.UUID(primary_key=True, default=lambda:uuid4()) + id = columns.UUID(primary_key=True, default=lambda: uuid4()) count = columns.Integer() text = columns.Text(required=False) a_bool = columns.Boolean(default=False) @@ -153,7 +147,7 @@ def tearDownClass(cls): drop_table(TestMultiKeyModel) def test_deleting_only_deletes_one_object(self): - partition = random.randint(0,1000) + partition = random.randint(0, 1000) for i in range(5): TestMultiKeyModel.create(partition=partition, cluster=i, count=i, text=str(i)) @@ -262,17 +256,20 @@ class IndexDefinitionModel(Model): key = columns.UUID(primary_key=True) val = columns.Text(index=True) + class TestIndexedColumnDefinition(BaseCassEngTestCase): def test_exception_isnt_raised_if_an_index_is_defined_more_than_once(self): sync_table(IndexDefinitionModel) sync_table(IndexDefinitionModel) + class ReservedWordModel(Model): token = columns.Text(primary_key=True) insert = columns.Integer(index=True) + class TestQueryQuoting(BaseCassEngTestCase): def test_reserved_cql_words_can_be_used_as_column_names(self): @@ -323,6 +320,7 @@ def test_query_with_date(self): assert inst.test_id == uid assert inst.date == day + def test_none_filter_fails(): class NoneFilterModel(Model): @@ -335,6 +333,3 @@ class NoneFilterModel(Model): raise Exception("fail") except CQLEngineException as e: pass - - - diff --git a/tests/integration/cqlengine/model/test_updates.py b/tests/integration/cqlengine/model/test_updates.py index 71bc989d92..ff8d8ecf42 100644 --- a/tests/integration/cqlengine/model/test_updates.py +++ b/tests/integration/cqlengine/model/test_updates.py @@ -15,7 +15,7 @@ from uuid import uuid4 from mock import patch -from cassandra.cqlengine.exceptions import ValidationError +from cassandra.cqlengine import ValidationError from tests.integration.cqlengine.base import BaseCassEngTestCase from cassandra.cqlengine.models import Model @@ -102,4 +102,3 @@ def test_primary_key_update_failure(self): m0 = TestUpdateModel.create(count=5, text='monkey') with self.assertRaises(ValidationError): m0.update(partition=uuid4()) - diff --git a/tests/integration/cqlengine/query/test_updates.py b/tests/integration/cqlengine/query/test_updates.py index 8c5fb95c31..4efb79950a 100644 --- a/tests/integration/cqlengine/query/test_updates.py +++ b/tests/integration/cqlengine/query/test_updates.py @@ -13,7 +13,7 @@ # limitations under the License. from uuid import uuid4 -from cassandra.cqlengine.exceptions import ValidationError +from cassandra.cqlengine import ValidationError from cassandra.cqlengine.query import QueryException from tests.integration.cqlengine.base import BaseCassEngTestCase From fa49e9e26ea0e157051d62ee0c393e2b7c8193af Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 17 Mar 2015 11:22:27 -0500 Subject: [PATCH 1269/3726] assertEquals --> assertEqual in cqlengine (deprecated) --- .../model/test_class_construction.py | 12 +++++------ .../cqlengine/model/test_model_io.py | 12 +++++------ .../cqlengine/model/test_value_lists.py | 4 ++-- .../cqlengine/query/test_queryoperators.py | 4 ++-- .../integration/cqlengine/test_ifnotexists.py | 20 +++++++++---------- 5 files changed, 26 insertions(+), 26 deletions(-) diff --git a/tests/integration/cqlengine/model/test_class_construction.py b/tests/integration/cqlengine/model/test_class_construction.py index 141f6f625b..9b4d479f36 100644 --- a/tests/integration/cqlengine/model/test_class_construction.py +++ b/tests/integration/cqlengine/model/test_class_construction.py @@ -61,8 +61,8 @@ class WildDBNames(Model): numbers = columns.Integer(db_field='integers_etc') db_map = WildDBNames._db_map - self.assertEquals(db_map['words_and_whatnot'], 'content') - self.assertEquals(db_map['integers_etc'], 'numbers') + self.assertEqual(db_map['words_and_whatnot'], 'content') + self.assertEqual(db_map['integers_etc'], 'numbers') def test_attempting_to_make_duplicate_column_names_fails(self): """ @@ -86,7 +86,7 @@ class Stuff(Model): content = columns.Text() numbers = columns.Integer() - self.assertEquals([x for x in Stuff._columns.keys()], ['id', 'words', 'content', 'numbers']) + self.assertEqual([x for x in Stuff._columns.keys()], ['id', 'words', 'content', 'numbers']) def test_exception_raised_when_creating_class_without_pk(self): with self.assertRaises(ModelDefinitionException): @@ -109,8 +109,8 @@ class Stuff(Model): inst2 = Stuff(num=7) self.assertNotEquals(inst1.num, inst2.num) - self.assertEquals(inst1.num, 5) - self.assertEquals(inst2.num, 7) + self.assertEqual(inst1.num, 5) + self.assertEqual(inst2.num, 7) def test_superclass_fields_are_inherited(self): """ @@ -183,7 +183,7 @@ class ModelWithPartitionKeys(Model): self.assertTrue(cols['p2'].partition_key) obj = ModelWithPartitionKeys(p1='a', p2='b') - self.assertEquals(obj.pk, ('a', 'b')) + self.assertEqual(obj.pk, ('a', 'b')) def test_del_attribute_is_assigned_properly(self): """ Tests that columns that can be deleted have the del attribute """ diff --git a/tests/integration/cqlengine/model/test_model_io.py b/tests/integration/cqlengine/model/test_model_io.py index 621c6c6638..b9f3936f19 100644 --- a/tests/integration/cqlengine/model/test_model_io.py +++ b/tests/integration/cqlengine/model/test_model_io.py @@ -56,7 +56,7 @@ def test_model_save_and_load(self): self.assertIsInstance(tm2, TestModel) for cname in tm._columns.keys(): - self.assertEquals(getattr(tm, cname), getattr(tm2, cname)) + self.assertEqual(getattr(tm, cname), getattr(tm2, cname)) def test_model_read_as_dict(self): """ @@ -69,13 +69,13 @@ def test_model_read_as_dict(self): 'text': tm.text, 'a_bool': tm.a_bool, } - self.assertEquals(sorted(tm.keys()), sorted(column_dict.keys())) + self.assertEqual(sorted(tm.keys()), sorted(column_dict.keys())) self.assertItemsEqual(tm.values(), column_dict.values()) - self.assertEquals( + self.assertEqual( sorted(tm.items(), key=itemgetter(0)), sorted(column_dict.items(), key=itemgetter(0))) - self.assertEquals(len(tm), len(column_dict)) + self.assertEqual(len(tm), len(column_dict)) for column_id in column_dict.keys(): self.assertEqual(tm[column_id], column_dict[column_id]) @@ -93,8 +93,8 @@ def test_model_updating_works_properly(self): tm.save() tm2 = TestModel.objects(id=tm.pk).first() - self.assertEquals(tm.count, tm2.count) - self.assertEquals(tm.a_bool, tm2.a_bool) + self.assertEqual(tm.count, tm2.count) + self.assertEqual(tm.a_bool, tm2.a_bool) def test_model_deleting_works_properly(self): """ diff --git a/tests/integration/cqlengine/model/test_value_lists.py b/tests/integration/cqlengine/model/test_value_lists.py index c3ddc28ea4..28b5a0d808 100644 --- a/tests/integration/cqlengine/model/test_value_lists.py +++ b/tests/integration/cqlengine/model/test_value_lists.py @@ -55,7 +55,7 @@ def test_clustering_order(self): values = list(TestModel.objects.values_list('clustering_key', flat=True)) # [19L, 18L, 17L, 16L, 15L, 14L, 13L, 12L, 11L, 10L, 9L, 8L, 7L, 6L, 5L, 4L, 3L, 2L, 1L, 0L] - self.assertEquals(values, sorted(items, reverse=True)) + self.assertEqual(values, sorted(items, reverse=True)) def test_clustering_order_more_complex(self): """ @@ -70,6 +70,6 @@ def test_clustering_order_more_complex(self): values = list(TestClusteringComplexModel.objects.values_list('some_value', flat=True)) - self.assertEquals([2] * 20, values) + self.assertEqual([2] * 20, values) drop_table(TestClusteringComplexModel) diff --git a/tests/integration/cqlengine/query/test_queryoperators.py b/tests/integration/cqlengine/query/test_queryoperators.py index 02774cf2f3..4e28090672 100644 --- a/tests/integration/cqlengine/query/test_queryoperators.py +++ b/tests/integration/cqlengine/query/test_queryoperators.py @@ -99,7 +99,7 @@ class TestModel(Model): q = TestModel.objects.filter(pk__token__gt=func) where = q._where[0] where.set_context_id(1) - self.assertEquals(str(where), 'token("p1", "p2") > token(%({})s, %({})s)'.format(1, 2)) + self.assertEqual(str(where), 'token("p1", "p2") > token(%({})s, %({})s)'.format(1, 2)) # Verify that a SELECT query can be successfully generated str(q._select_query()) @@ -111,7 +111,7 @@ class TestModel(Model): q = TestModel.objects.filter(pk__token__gt=func) where = q._where[0] where.set_context_id(1) - self.assertEquals(str(where), 'token("p1", "p2") > token(%({})s, %({})s)'.format(1, 2)) + self.assertEqual(str(where), 'token("p1", "p2") > token(%({})s, %({})s)'.format(1, 2)) str(q._select_query()) # The 'pk__token' virtual column may only be compared to a Token diff --git a/tests/integration/cqlengine/test_ifnotexists.py b/tests/integration/cqlengine/test_ifnotexists.py index 9cc4d3c556..98b192a5d7 100644 --- a/tests/integration/cqlengine/test_ifnotexists.py +++ b/tests/integration/cqlengine/test_ifnotexists.py @@ -85,8 +85,8 @@ def test_insert_if_not_exists_success(self): self.assertEqual(len(q), 1) tm = q.first() - self.assertEquals(tm.count, 8) - self.assertEquals(tm.text, '123456789') + self.assertEqual(tm.count, 8) + self.assertEqual(tm.text, '123456789') def test_insert_if_not_exists_failure(self): """ tests that insertion with if_not_exists failure """ @@ -97,11 +97,11 @@ def test_insert_if_not_exists_failure(self): TestIfNotExistsModel.create(id=id, count=9, text='111111111111') q = TestIfNotExistsModel.objects(id=id) - self.assertEquals(len(q), 1) + self.assertEqual(len(q), 1) tm = q.first() - self.assertEquals(tm.count, 9) - self.assertEquals(tm.text, '111111111111') + self.assertEqual(tm.count, 9) + self.assertEqual(tm.text, '111111111111') @skipUnless(PROTOCOL_VERSION >= 2, "only runs against the cql3 protocol v2.0") def test_batch_insert_if_not_exists_success(self): @@ -121,8 +121,8 @@ def test_batch_insert_if_not_exists_success(self): self.assertEqual(len(q), 1) tm = q.first() - self.assertEquals(tm.count, 8) - self.assertEquals(tm.text, '123456789') + self.assertEqual(tm.count, 8) + self.assertEqual(tm.text, '123456789') def test_batch_insert_if_not_exists_failure(self): """ tests that batch insertion with if_not_exists failure """ @@ -134,11 +134,11 @@ def test_batch_insert_if_not_exists_failure(self): TestIfNotExistsModel.batch(b).create(id=id, count=9, text='111111111111') q = TestIfNotExistsModel.objects(id=id) - self.assertEquals(len(q), 1) + self.assertEqual(len(q), 1) tm = q.first() - self.assertEquals(tm.count, 9) - self.assertEquals(tm.text, '111111111111') + self.assertEqual(tm.count, 9) + self.assertEqual(tm.text, '111111111111') class IfNotExistsModelTest(BaseIfNotExistsTest): From 1df92a9ea5a6544840462717b9cf65dd0b196de4 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 17 Mar 2015 11:24:46 -0500 Subject: [PATCH 1270/3726] assertNotEquals --> assertNotEqual in cqlengine (deprecated) --- tests/integration/cqlengine/model/test_class_construction.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/cqlengine/model/test_class_construction.py b/tests/integration/cqlengine/model/test_class_construction.py index 9b4d479f36..a267e596a3 100644 --- a/tests/integration/cqlengine/model/test_class_construction.py +++ b/tests/integration/cqlengine/model/test_class_construction.py @@ -108,7 +108,7 @@ class Stuff(Model): inst1 = Stuff(num=5) inst2 = Stuff(num=7) - self.assertNotEquals(inst1.num, inst2.num) + self.assertNotEqual(inst1.num, inst2.num) self.assertEqual(inst1.num, 5) self.assertEqual(inst2.num, 7) From 527df352e530e76b85a6f51544c9ef074904d1fd Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 17 Mar 2015 12:01:59 -0500 Subject: [PATCH 1271/3726] log.warn --> log.warning in cqlengine (deprecated) --- cassandra/cqlengine/columns.py | 2 +- cassandra/cqlengine/connection.py | 6 +++--- cassandra/cqlengine/management.py | 6 +++--- cassandra/cqlengine/models.py | 2 +- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index a6b1c6c5e6..1491b777db 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -185,7 +185,7 @@ def __init__(self, if polymorphic_key: msg = "polymorphic_key is deprecated. Use discriminator_column instead." warnings.warn(msg, DeprecationWarning) - log.warn(msg) + log.warning(msg) self.discriminator_column = discriminator_column or polymorphic_key self.polymorphic_key = self.discriminator_column diff --git a/cassandra/cqlengine/connection.py b/cassandra/cqlengine/connection.py index 53a5ec3a48..7b1ffcf9b5 100644 --- a/cassandra/cqlengine/connection.py +++ b/cassandra/cqlengine/connection.py @@ -54,7 +54,7 @@ def default(): global cluster, session if session: - log.warn("configuring new connection for cqlengine when one was already set") + log.warning("configuring new connection for cqlengine when one was already set") cluster = Cluster() session = cluster.connect() @@ -75,7 +75,7 @@ def set_session(s): global cluster, session if session: - log.warn("configuring new connection for cqlengine when one was already set") + log.warning("configuring new connection for cqlengine when one was already set") if s.row_factory is not dict_factory: raise CQLEngineException("Failed to initialize: 'Session.row_factory' must be 'dict_factory'.") @@ -130,7 +130,7 @@ def setup( log.debug("cqlengine connection initialized with internally created session") except NoHostAvailable: if retry_connect: - log.warn("connect failed, setting up for re-attempt on first use") + log.warning("connect failed, setting up for re-attempt on first use") kwargs['default_keyspace'] = default_keyspace kwargs['consistency'] = consistency kwargs['lazy_connect'] = False diff --git a/cassandra/cqlengine/management.py b/cassandra/cqlengine/management.py index 66016f482d..5b6767ad64 100644 --- a/cassandra/cqlengine/management.py +++ b/cassandra/cqlengine/management.py @@ -61,7 +61,7 @@ def create_keyspace(name, strategy_class, replication_factor, durable_writes=Tru msg = "Deprecated. Use create_keyspace_simple or create_keyspace_network_topology instead" warnings.warn(msg, DeprecationWarning) - log.warn(msg) + log.warning(msg) cluster = get_cluster() @@ -143,7 +143,7 @@ def _create_keyspace(name, durable_writes, strategy_class, strategy_options): def delete_keyspace(name): msg = "Deprecated. Use drop_keyspace instead" warnings.warn(msg, DeprecationWarning) - log.warn(msg) + log.warning(msg) drop_keyspace(name) @@ -532,6 +532,6 @@ def _allow_schema_modification(): if not os.getenv(CQLENG_ALLOW_SCHEMA_MANAGEMENT): msg = CQLENG_ALLOW_SCHEMA_MANAGEMENT + " environment variable is not set. Future versions of this package will require this variable to enable management functions." warnings.warn(msg) - log.warn(msg) + log.warning(msg) return True diff --git a/cassandra/cqlengine/models.py b/cassandra/cqlengine/models.py index a27028d272..e0b13c2251 100644 --- a/cassandra/cqlengine/models.py +++ b/cassandra/cqlengine/models.py @@ -773,7 +773,7 @@ def __new__(cls, name, bases, attrs): if poly_key: msg = '__polymorphic_key__ is deprecated. Use __discriminator_value__ instead' warnings.warn(msg, DeprecationWarning) - log.warn(msg) + log.warning(msg) attrs['__discriminator_value__'] = attrs.get('__discriminator_value__', poly_key) attrs['__polymorphic_key__'] = attrs['__discriminator_value__'] From addbf10547a82628c6c641050a225953624feca3 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 17 Mar 2015 12:03:28 -0500 Subject: [PATCH 1272/3726] log.warn --> log.warning in cqltypes.py (deprecated) --- cassandra/cqltypes.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index 065d313276..e51e774ece 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -76,14 +76,14 @@ def trim_if_startswith(s, prefix): def unix_time_from_uuid1(u): msg = "'cassandra.cqltypes.unix_time_from_uuid1' has moved to 'cassandra.util'. This entry point will be removed in the next major version." warnings.warn(msg, DeprecationWarning) - log.warn(msg) + log.warning(msg) return util.unix_time_from_uuid1(u) def datetime_from_timestamp(timestamp): msg = "'cassandra.cqltypes.datetime_from_timestamp' has moved to 'cassandra.util'. This entry point will be removed in the next major version." warnings.warn(msg, DeprecationWarning) - log.warn(msg) + log.warning(msg) return util.datetime_from_timestamp(timestamp) From 35f1fbbe39012e42804bca20f45dd9e973bd2baa Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 17 Mar 2015 12:04:34 -0500 Subject: [PATCH 1273/3726] test init: select from system.local instead of setting keyspace --- tests/integration/__init__.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index e68afc96c6..5d976ac5ff 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -63,8 +63,7 @@ def get_server_versions(): c = Cluster() s = c.connect() - s.set_keyspace('system') - row = s.execute('SELECT cql_version, release_version FROM local')[0] + row = s.execute('SELECT cql_version, release_version FROM system.local')[0] cass_version = _tuple_version(row.release_version) cql_version = _tuple_version(row.cql_version) From 0f6e2bc408564829121f7302211f70af3a495d67 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 17 Mar 2015 12:04:39 -0500 Subject: [PATCH 1274/3726] assertDictContainsSubset is deprecated --- tests/integration/cqlengine/management/test_management.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/tests/integration/cqlengine/management/test_management.py b/tests/integration/cqlengine/management/test_management.py index 97608f6e36..99b34c5cf0 100644 --- a/tests/integration/cqlengine/management/test_management.py +++ b/tests/integration/cqlengine/management/test_management.py @@ -219,7 +219,9 @@ def test_set_table_properties(self): expected['default_time_to_live'] = 4756 expected['memtable_flush_period_in_ms'] = 43681 - self.assertDictContainsSubset(expected, management.get_table_settings(ModelWithTableProperties).options) + options = management.get_table_settings(ModelWithTableProperties).options + self.assertEqual(dict([(k, options.get(k)) for k in expected.keys()]), + expected) def test_table_property_update(self): ModelWithTableProperties.__bloom_filter_fp_chance__ = 0.66778 @@ -260,7 +262,8 @@ def test_table_property_update(self): expected['replicate_on_write'] = True expected['populate_io_cache_on_flush'] = False - self.assertDictContainsSubset(expected, table_settings) + self.assertEqual(dict([(k, table_settings.get(k)) for k in expected.keys()]), + expected) class SyncTableTests(BaseCassEngTestCase): From 98f466bdad70e4a376aaa6e608488dfd76f229c0 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 17 Mar 2015 13:08:15 -0500 Subject: [PATCH 1275/3726] Add tests for inherited model 'discriminator' attributes --- .../cqlengine/model/test_polymorphism.py | 233 +++++++++++++++++- 1 file changed, 230 insertions(+), 3 deletions(-) diff --git a/tests/integration/cqlengine/model/test_polymorphism.py b/tests/integration/cqlengine/model/test_polymorphism.py index cf4a72e7a8..6bd835c205 100644 --- a/tests/integration/cqlengine/model/test_polymorphism.py +++ b/tests/integration/cqlengine/model/test_polymorphism.py @@ -153,9 +153,6 @@ def test_delete_on_polymorphic_subclass_does_not_include_polymorphic_key(self): self.assertNotIn("row_type", m.call_args[0][0].query_string) - - - class UnindexedPolyBase(models.Model): partition = columns.UUID(primary_key=True, default=uuid.uuid4) @@ -202,6 +199,7 @@ def test_non_conflicting_type_results_work(self): p1, p2, p3 = self.p1, self.p2, self.p3 assert len(list(UnindexedPoly1.objects(partition=p1.partition, cluster=p1.cluster))) == 1 assert len(list(UnindexedPoly2.objects(partition=p1.partition, cluster=p2.cluster))) == 1 + assert len(list(UnindexedPoly3.objects(partition=p1.partition, cluster=p3.cluster))) == 1 def test_subclassed_model_results_work_properly(self): p1, p2, p3 = self.p1, self.p2, self.p3 @@ -253,3 +251,232 @@ def test_success_case(self): assert len(list(IndexedPoly2.objects(partition=self.p1.partition))) == 1 +######### +# Repeated tests for 'discriminator' properties, following deprecation of polymorphic variants +######### +class TestInheritanceClassConstruction(BaseCassEngTestCase): + + def test_multiple_discriminator_value_failure(self): + """ Tests that defining a model with more than one discriminator column fails """ + with self.assertRaises(models.ModelDefinitionException): + class M(models.Model): + partition = columns.Integer(primary_key=True) + type1 = columns.Integer(discriminator_column=True) + type2 = columns.Integer(discriminator_column=True) + + def test_discriminator_value_inheritance(self): + """ Tests that discriminator_column attribute is not inherited """ + class Base(models.Model): + + partition = columns.Integer(primary_key=True) + type1 = columns.Integer(discriminator_column=True) + + class M1(Base): + __discriminator_value__ = 1 + + class M2(M1): + pass + + assert M2.__discriminator_value__ is None + + def test_inheritance_metaclass(self): + """ Tests that the model meta class configures inherited models properly """ + class Base(models.Model): + + partition = columns.Integer(primary_key=True) + type1 = columns.Integer(discriminator_column=True) + + class M1(Base): + __discriminator_value__ = 1 + + assert Base._is_polymorphic + assert M1._is_polymorphic + + assert Base._is_polymorphic_base + assert not M1._is_polymorphic_base + + assert Base._discriminator_column is Base._columns['type1'] + assert M1._discriminator_column is M1._columns['type1'] + + assert Base._discriminator_column_name == 'type1' + assert M1._discriminator_column_name == 'type1' + + def test_table_names_are_inherited_from_base(self): + class Base(models.Model): + + partition = columns.Integer(primary_key=True) + type1 = columns.Integer(discriminator_column=True) + + class M1(Base): + __discriminator_value__ = 1 + + assert Base.column_family_name() == M1.column_family_name() + + def test_collection_columns_cant_be_discriminator_column(self): + with self.assertRaises(models.ModelDefinitionException): + class Base(models.Model): + + partition = columns.Integer(primary_key=True) + type1 = columns.Set(columns.Integer, discriminator_column=True) + + +class InheritBase(models.Model): + + partition = columns.UUID(primary_key=True, default=uuid.uuid4) + row_type = columns.Integer(discriminator_column=True) + + +class Inherit1(InheritBase): + __discriminator_value__ = 1 + data1 = columns.Text() + + +class Inherit2(InheritBase): + __discriminator_value__ = 2 + data2 = columns.Text() + + +class TestInheritanceModel(BaseCassEngTestCase): + + @classmethod + def setUpClass(cls): + super(TestInheritanceModel, cls).setUpClass() + management.sync_table(Inherit1) + management.sync_table(Inherit2) + + @classmethod + def tearDownClass(cls): + super(TestInheritanceModel, cls).tearDownClass() + management.drop_table(Inherit1) + management.drop_table(Inherit2) + + def test_saving_base_model_fails(self): + with self.assertRaises(models.PolymorphicModelException): + InheritBase.create() + + def test_saving_subclass_saves_disc_value(self): + p1 = Inherit1.create(data1='pickle') + p2 = Inherit2.create(data2='bacon') + + assert p1.row_type == Inherit1.__discriminator_value__ + assert p2.row_type == Inherit2.__discriminator_value__ + + def test_query_deserialization(self): + p1 = Inherit1.create(data1='pickle') + p2 = Inherit2.create(data2='bacon') + + p1r = InheritBase.get(partition=p1.partition) + p2r = InheritBase.get(partition=p2.partition) + + assert isinstance(p1r, Inherit1) + assert isinstance(p2r, Inherit2) + + def test_delete_on_subclass_does_not_include_disc_value(self): + p1 = Inherit1.create() + session = get_session() + with mock.patch.object(session, 'execute') as m: + Inherit1.objects(partition=p1.partition).delete() + + # make sure our discriminator value isn't in the CQL + # not sure how we would even get here if it was in there + # since the CQL would fail. + + self.assertNotIn("row_type", m.call_args[0][0].query_string) + + +class UnindexedInheritBase(models.Model): + + partition = columns.UUID(primary_key=True, default=uuid.uuid4) + cluster = columns.UUID(primary_key=True, default=uuid.uuid4) + row_type = columns.Integer(discriminator_column=True) + + +class UnindexedInherit1(UnindexedInheritBase): + __discriminator_value__ = 1 + data1 = columns.Text() + + +class UnindexedInherit2(UnindexedInheritBase): + __discriminator_value__ = 2 + data2 = columns.Text() + + +class UnindexedInherit3(UnindexedInherit2): + __discriminator_value__ = 3 + data3 = columns.Text() + + +class TestUnindexedInheritanceQuery(BaseCassEngTestCase): + + @classmethod + def setUpClass(cls): + super(TestUnindexedInheritanceQuery, cls).setUpClass() + management.sync_table(UnindexedInherit1) + management.sync_table(UnindexedInherit2) + management.sync_table(UnindexedInherit3) + + cls.p1 = UnindexedInherit1.create(data1='pickle') + cls.p2 = UnindexedInherit2.create(partition=cls.p1.partition, data2='bacon') + cls.p3 = UnindexedInherit3.create(partition=cls.p1.partition, data3='turkey') + + @classmethod + def tearDownClass(cls): + super(TestUnindexedInheritanceQuery, cls).tearDownClass() + management.drop_table(UnindexedInherit1) + management.drop_table(UnindexedInherit2) + management.drop_table(UnindexedInherit3) + + def test_non_conflicting_type_results_work(self): + p1, p2, p3 = self.p1, self.p2, self.p3 + assert len(list(UnindexedInherit1.objects(partition=p1.partition, cluster=p1.cluster))) == 1 + assert len(list(UnindexedInherit2.objects(partition=p1.partition, cluster=p2.cluster))) == 1 + assert len(list(UnindexedInherit3.objects(partition=p1.partition, cluster=p3.cluster))) == 1 + + def test_subclassed_model_results_work_properly(self): + p1, p2, p3 = self.p1, self.p2, self.p3 + assert len(list(UnindexedInherit2.objects(partition=p1.partition, cluster__in=[p2.cluster, p3.cluster]))) == 2 + + def test_conflicting_type_results(self): + with self.assertRaises(models.PolymorphicModelException): + list(UnindexedInherit1.objects(partition=self.p1.partition)) + with self.assertRaises(models.PolymorphicModelException): + list(UnindexedInherit2.objects(partition=self.p1.partition)) + + +class IndexedInheritBase(models.Model): + + partition = columns.UUID(primary_key=True, default=uuid.uuid4) + cluster = columns.UUID(primary_key=True, default=uuid.uuid4) + row_type = columns.Integer(discriminator_column=True, index=True) + + +class IndexedInherit1(IndexedInheritBase): + __discriminator_value__ = 1 + data1 = columns.Text() + + +class IndexedInherit2(IndexedInheritBase): + __discriminator_value__ = 2 + data2 = columns.Text() + + +class TestIndexedInheritanceQuery(BaseCassEngTestCase): + + @classmethod + def setUpClass(cls): + super(TestIndexedInheritanceQuery, cls).setUpClass() + management.sync_table(IndexedInherit1) + management.sync_table(IndexedInherit2) + + cls.p1 = IndexedInherit1.create(data1='pickle') + cls.p2 = IndexedInherit2.create(partition=cls.p1.partition, data2='bacon') + + @classmethod + def tearDownClass(cls): + super(TestIndexedInheritanceQuery, cls).tearDownClass() + management.drop_table(IndexedInherit1) + management.drop_table(IndexedInherit2) + + def test_success_case(self): + assert len(list(IndexedInherit1.objects(partition=self.p1.partition))) == 1 + assert len(list(IndexedInherit2.objects(partition=self.p1.partition))) == 1 From b39daa128e3fe9d84c8c8cfc11f82c7524ae4a34 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 17 Mar 2015 16:03:20 -0500 Subject: [PATCH 1276/3726] Add warning when list prepend is used. --- cassandra/cqlengine/statements.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/cassandra/cqlengine/statements.py b/cassandra/cqlengine/statements.py index 234150819f..f64cc7a0de 100644 --- a/cassandra/cqlengine/statements.py +++ b/cassandra/cqlengine/statements.py @@ -13,13 +13,17 @@ # limitations under the License. from datetime import datetime, timedelta +import logging import time import six +import warnings from cassandra.cqlengine import UnicodeMixin from cassandra.cqlengine.functions import QueryValue from cassandra.cqlengine.operators import BaseWhereOperator, InOperator +log = logging.getLogger(__name__) + class StatementException(Exception): pass @@ -291,6 +295,11 @@ def update_context(self, ctx): ctx[str(ctx_id)] = self._to_database(self._assignments) ctx_id += 1 if self._prepend is not None: + msg = "Previous versions of cqlengine implicitly reversed prepended lists to account for CASSANDRA-8733. " \ + "THIS VERSION DOES NOT. This warning will be removed in a future release." + warnings.warn(msg) + log.warning(msg) + ctx[str(ctx_id)] = self._to_database(self._prepend) ctx_id += 1 if self._append is not None: From e1dbefcdac297cc6112a47443a40b4d6eb7bd75f Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 18 Mar 2015 09:20:45 -0500 Subject: [PATCH 1277/3726] Raise exception for models defining discriminator value, but no column --- cassandra/cqlengine/models.py | 3 +++ tests/integration/cqlengine/model/test_polymorphism.py | 10 ++++++++++ 2 files changed, 13 insertions(+) diff --git a/cassandra/cqlengine/models.py b/cassandra/cqlengine/models.py index e0b13c2251..e56220babb 100644 --- a/cassandra/cqlengine/models.py +++ b/cassandra/cqlengine/models.py @@ -796,6 +796,9 @@ def _transform_column(col_name, col_obj): if len(discriminator_columns) > 1: raise ModelDefinitionException('only one discriminator_column (polymorphic_key (deprecated)) can be defined in a model, {} found'.format(len(discriminator_columns))) + if attrs['__discriminator_value__'] and not is_polymorphic: + raise ModelDefinitionException('__discriminator_value__ specified, but no base columns defined with discriminator_column=True') + discriminator_column_name, discriminator_column = discriminator_columns[0] if discriminator_columns else (None, None) if isinstance(discriminator_column, (columns.BaseContainerColumn, columns.Counter)): diff --git a/tests/integration/cqlengine/model/test_polymorphism.py b/tests/integration/cqlengine/model/test_polymorphism.py index 6bd835c205..431237d2de 100644 --- a/tests/integration/cqlengine/model/test_polymorphism.py +++ b/tests/integration/cqlengine/model/test_polymorphism.py @@ -33,6 +33,11 @@ class M(models.Model): type1 = columns.Integer(polymorphic_key=True) type2 = columns.Integer(polymorphic_key=True) + def test_no_polymorphic_key_column_failure(self): + with self.assertRaises(models.ModelDefinitionException): + class M(models.Model): + __polymorphic_key__ = 1 + def test_polymorphic_key_inheritance(self): """ Tests that polymorphic_key attribute is not inherited """ class Base(models.Model): @@ -264,6 +269,11 @@ class M(models.Model): type1 = columns.Integer(discriminator_column=True) type2 = columns.Integer(discriminator_column=True) + def test_no_discriminator_column_failure(self): + with self.assertRaises(models.ModelDefinitionException): + class M(models.Model): + __discriminator_value__ = 1 + def test_discriminator_value_inheritance(self): """ Tests that discriminator_column attribute is not inherited """ class Base(models.Model): From 2bbe76e59b2c17e56b87d64b645f69cba0c81f81 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 18 Mar 2015 11:13:42 -0500 Subject: [PATCH 1278/3726] Add an upgrade guide for cqlengine --- docs/cqlengine/upgrade_guide.rst | 136 +++++++++++++++++++++++++++++++ docs/object_mapper.rst | 4 + 2 files changed, 140 insertions(+) create mode 100644 docs/cqlengine/upgrade_guide.rst diff --git a/docs/cqlengine/upgrade_guide.rst b/docs/cqlengine/upgrade_guide.rst new file mode 100644 index 0000000000..749106de5b --- /dev/null +++ b/docs/cqlengine/upgrade_guide.rst @@ -0,0 +1,136 @@ +======================== +Upgrade Guide +======================== + +This is an overview of things that changed as the cqlengine project was merged into +cassandra-driver. While efforts were taken to preserve the API and most functionality exactly, +conversion to this package will still require certain minimal updates (namely, imports). + +**THERE IS ONE FUNCTIONAL CHANGE**, described in the first section below. + +Functional Change +================= +Legacy cqlengine included a workaround for a Cassandra bug in which prepended list segments were +reversed (`CASSANDRA-8733 `_). As of +this integration, this workaround is removed. The first released integrated version emits +a warning when prepend is used. Subsequent versions will have this warning removed. + +Remove cqlengine +================ +To avoid confusion or mistakes using the legacy package in your application, it +is prudent to remove the cqlengine package when upgrading to the integrated version. + +The driver setup script will warn if the legacy package is detected during install, +but it will not prevent side-by-side installation. + +Organization +============ +Imports +------- +cqlengine is now integrated as a sub-package of the driver base package 'cassandra'. +Upgrading will require adjusting imports to cqlengine. For example:: + + from cqlengine import columns + +is now:: + + from cassandra.cqlengine import columns + +Package-Level Aliases +--------------------- +Legacy cqlengine defined a number of aliases at the package level, which became redundant +when the package was integrated for a driver. These have been removed in favor of absolute +imports, and referring to cannonical definitions. For example, ``cqlengine.ONE`` was an alias +of ``cassandra.ConsistencyLevel.ONE``. In the integrated package, only the +:class:`cassandra.ConsistencyLevel` remains. + +Additionally, submodule aliases are removed from cqlengine in favor of absolute imports. + +These aliases are removed, and not deprecated because they should be straightforward to iron out +at module load time. + +Exceptions +---------- +The legacy cqlengine.exceptions module had a number of Exception classes that were variously +common to the package, or only used in specific modules. Common exceptions were relocated to +cqlengine, and specialized exceptions were placed in the module that raises them. Below is a +listing of updated locations: + +============================ ========== +Exception class New module +============================ ========== +CQLEngineException cassandra.cqlengine +ModelException cassandra.cqlengine.models +ValidationError cassandra.cqlengine +UndefinedKeyspaceException cassandra.cqlengine.connection +LWTException cassandra.cqlengine.query +IfNotExistsWithCounterColumn cassandra.cqlengine.query +============================ ========== + +UnicodeMixin Consolidation +-------------------------- +``class UnicodeMixin`` was defined in several cqlengine modules. This has been consolidated +to a single definition in the cqlengine package init file. This is not technically part of +the API, but noted here for completeness. + +API Deprecations +================ +This upgrade served as a good juncture to deprecate certain API features and invite users to upgrade +to new ones. The first released version does not change functionality -- only introduces deprecation +warnings. Future releases will remove these features in favor of the alternatives. + +Schema Management +----------------- +``cassandra.cqlengine.management.create_keyspace`` is deprecated. Instead, use the new replication-strategy-specific +functions that accept explicit options for known strategies: + +- :func:`~.create_keyspace_simple` +- :func:`~.create_keyspace_network_topology` + +``cassandra.cqlengine.management.delete_keyspace`` is deprecated in favor of a new function, :func:`~.drop_keyspace`. The +intent is simply to make the function match the CQL verb it invokes. + +Model Inheritance +----------------- +The names for class attributes controlling model inheritance are changing. Changes are as follows: + +- Replace 'polymorphic_key' in the base class Column definition with :attr:`~.discriminator_column` +- Replace the '__polymporphic_key__' class attribute the derived classes with :attr:`~.__discriminator_value__` + +The functionality is unchanged -- the intent here is to make the names and language around these attributes more precise. +For now, the old names are just deprecated, and the mapper will emit warnings if they are used. The old names +will be removed in a future version. + +The example below shows a simple translation: + +Before:: + + class Pet(Model): + __table_name__ = 'pet' + owner_id = UUID(primary_key=True) + pet_id = UUID(primary_key=True) + pet_type = Text(polymorphic_key=True) + name = Text() + + class Cat(Pet): + __polymorphic_key__ = 'cat' + + class Dog(Pet): + __polymorphic_key__ = 'dog' + +After:: + + class Pet(models.Model): + __table_name__ = 'pet' + owner_id = UUID(primary_key=True) + pet_id = UUID(primary_key=True) + pet_type = Text(discriminator_column=True) + name = Text() + + class Cat(Pet): + __discriminator_value__ = 'cat' + + class Dog(Pet): + __discriminator_value__ = 'dog' + + diff --git a/docs/object_mapper.rst b/docs/object_mapper.rst index 2e982a5012..26d78a0964 100644 --- a/docs/object_mapper.rst +++ b/docs/object_mapper.rst @@ -7,6 +7,9 @@ cqlengine is the Cassandra CQL 3 Object Mapper packaged with this driver Contents -------- +:doc:`cqlengine/upgrade_guide` + For migrating projects from legacy cqlengine, to the integrated product + :doc:`cqlengine/models` Examples defining models, and mapping them to tables @@ -27,6 +30,7 @@ Contents .. toctree:: :hidden: + cqlengine/upgrade_guide cqlengine/models cqlengine/queryset cqlengine/batches From 28b2fb9fa2dca01f68939f013d23cdc374cfbdef Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 18 Mar 2015 15:27:21 -0500 Subject: [PATCH 1279/3726] Update changelog --- CHANGELOG.rst | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 47b9d29cc1..10e45c3b1c 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,5 +1,25 @@ +2.5.0 +===== +March 30, 2015 + +Features +-------- +* Integrated cqlengine object mapping package +* Utility functions for converting timeuuids and datetime (PYTHON-99) +* Schema metadata fetch window randomized, config options added (PYTHON-202) +* Support for new Date and Time Cassandra types (PYTHON-190) + +Bug Fixes +--------- +* Fix index target for collection indexes (full(), keys()) (PYTHON-222) +* Thread exception during GIL cleanup (PYTHON-229) +* Workaround for rounding anomaly in datetime.utcfromtime (Python 3.4) (PYTHON-230) +* Normalize text serialization for lookup in OrderedMap (PYTHON-231) +* Support reading CompositeType data (PYTHON-234) + 2.1.4 ===== +January 26, 2015 Features -------- From ff17f4d146206cac5b299f58771c438f1f62c91f Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 18 Mar 2015 16:30:12 -0500 Subject: [PATCH 1280/3726] Merge cqlengine travis config --- .travis.yml | 58 +++++++++++++++++++---------------------------------- 1 file changed, 21 insertions(+), 37 deletions(-) diff --git a/.travis.yml b/.travis.yml index 1f8462dc68..c42fafcd40 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,51 +1,35 @@ language: python python: 2.7 env: - - TOX_ENV=py26 - - TOX_ENV=py27 - - TOX_ENV=pypy - - TOX_ENV=py33 - - TOX_ENV=py34 + - TOX_ENV=py26 CASSANDRA_VERSION=21 + - TOX_ENV=py27 CASSANDRA_VERSION=12 + - TOX_ENV=py27 CASSANDRA_VERSION=20 + - TOX_ENV=py27 CASSANDRA_VERSION=21 + - TOX_ENV=pypy CASSANDRA_VERSION=21 + - TOX_ENV=py33 CASSANDRA_VERSION=21 + - TOX_ENV=py34 CASSANDRA_VERSION=21 before_install: - sudo apt-get update -y - sudo apt-get install -y build-essential python-dev - sudo apt-get install -y libev4 libev-dev + - sudo echo "deb http://www.apache.org/dist/cassandra/debian ${CASSANDRA_VERSION}x main" | sudo tee -a /etc/apt/sources.list + - sudo echo "deb-src http://www.apache.org/dist/cassandra/debian ${CASSANDRA_VERSION}x main" | sudo tee -a /etc/apt/sources.list + - sudo rm -rf ~/.gnupg + - sudo gpg --keyserver pgp.mit.edu --recv-keys F758CE318D77295D + - sudo gpg --export --armor F758CE318D77295D | sudo apt-key add - + - sudo gpg --keyserver pgp.mit.edu --recv-keys 2B5C1B00 + - sudo gpg --export --armor 2B5C1B00 | sudo apt-key add - + - sudo gpg --keyserver pgp.mit.edu --recv-keys 0353B12C + - sudo gpg --export --armor 0353B12C | sudo apt-key add - + - sudo apt-get update + - sudo apt-get -o Dpkg::Options::="--force-confnew" install -y cassandra + - sudo rm -rf /var/lib/cassandra/* + - sudo sh -c "echo 'JVM_OPTS=\"\${JVM_OPTS} -Djava.net.preferIPv4Stack=false\"' >> /etc/cassandra/cassandra-env.sh" + - sudo service cassandra start install: - pip install tox script: - tox -e $TOX_ENV - -# TODO: merge cqlengine tests -#env: -# - CASSANDRA_VERSION=12 -# - CASSANDRA_VERSION=20 -# - CASSANDRA_VERSION=21 -# -#python: -# - "2.7" -# - "3.4" -# -#before_install: -# - sudo echo "deb http://www.apache.org/dist/cassandra/debian ${CASSANDRA_VERSION}x main" | sudo tee -a /etc/apt/sources.list -# - sudo echo "deb-src http://www.apache.org/dist/cassandra/debian ${CASSANDRA_VERSION}x main" | sudo tee -a /etc/apt/sources.list -# - sudo rm -rf ~/.gnupg -# - sudo gpg --keyserver pgp.mit.edu --recv-keys F758CE318D77295D -# - sudo gpg --export --armor F758CE318D77295D | sudo apt-key add - -# - sudo gpg --keyserver pgp.mit.edu --recv-keys 2B5C1B00 -# - sudo gpg --export --armor 2B5C1B00 | sudo apt-key add - -# - sudo gpg --keyserver pgp.mit.edu --recv-keys 0353B12C -# - sudo gpg --export --armor 0353B12C | sudo apt-key add - -# - sudo apt-get update -# - sudo apt-get -o Dpkg::Options::="--force-confnew" install -y cassandra -# - sudo rm -rf /var/lib/cassandra/* -# - sudo sh -c "echo 'JVM_OPTS=\"\${JVM_OPTS} -Djava.net.preferIPv4Stack=false\"' >> /etc/cassandra/cassandra-env.sh" -# - sudo service cassandra start -# -#install: -# - "pip install -r requirements.txt --use-mirrors" -# -#script: -# - "nosetests cqlengine/tests --no-skip" From 30495d3c783af464dc37b231ececada29eeb7406 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 19 Mar 2015 10:57:52 -0500 Subject: [PATCH 1281/3726] Change C* version in travis matrix to avoid integration test conflict --- .travis.yml | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/.travis.yml b/.travis.yml index c42fafcd40..83ffff8fe1 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,20 +1,20 @@ language: python python: 2.7 env: - - TOX_ENV=py26 CASSANDRA_VERSION=21 - - TOX_ENV=py27 CASSANDRA_VERSION=12 - - TOX_ENV=py27 CASSANDRA_VERSION=20 - - TOX_ENV=py27 CASSANDRA_VERSION=21 - - TOX_ENV=pypy CASSANDRA_VERSION=21 - - TOX_ENV=py33 CASSANDRA_VERSION=21 - - TOX_ENV=py34 CASSANDRA_VERSION=21 + - TOX_ENV=py26 CASS_VER=21 + - TOX_ENV=py27 CASS_VER=12 + - TOX_ENV=py27 CASS_VER=20 + - TOX_ENV=py27 CASS_VER=21 + - TOX_ENV=pypy CASS_VER=21 + - TOX_ENV=py33 CASS_VER=21 + - TOX_ENV=py34 CASS_VER=21 before_install: - sudo apt-get update -y - sudo apt-get install -y build-essential python-dev - sudo apt-get install -y libev4 libev-dev - - sudo echo "deb http://www.apache.org/dist/cassandra/debian ${CASSANDRA_VERSION}x main" | sudo tee -a /etc/apt/sources.list - - sudo echo "deb-src http://www.apache.org/dist/cassandra/debian ${CASSANDRA_VERSION}x main" | sudo tee -a /etc/apt/sources.list + - sudo echo "deb http://www.apache.org/dist/cassandra/debian ${CASS_VER}x main" | sudo tee -a /etc/apt/sources.list + - sudo echo "deb-src http://www.apache.org/dist/cassandra/debian ${CASS_VER}x main" | sudo tee -a /etc/apt/sources.list - sudo rm -rf ~/.gnupg - sudo gpg --keyserver pgp.mit.edu --recv-keys F758CE318D77295D - sudo gpg --export --armor F758CE318D77295D | sudo apt-key add - From 08adcc76f3ad5fb250820db580a7e42acf217975 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 19 Mar 2015 11:01:35 -0500 Subject: [PATCH 1282/3726] protocol v1 for test get_server_ver because we don't know yet --- tests/integration/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 5d976ac5ff..ebc038a362 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -61,7 +61,7 @@ def get_server_versions(): if cass_version is not None: return (cass_version, cql_version) - c = Cluster() + c = Cluster(protocol_version=1) s = c.connect() row = s.execute('SELECT cql_version, release_version FROM system.local')[0] From fa2e4c77e9d5719e801730a854264d914a7a71d5 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 19 Mar 2015 11:01:39 -0500 Subject: [PATCH 1283/3726] Don't try to sync table with static if test will be skipped --- tests/integration/cqlengine/columns/test_static_column.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/tests/integration/cqlengine/columns/test_static_column.py b/tests/integration/cqlengine/columns/test_static_column.py index 0fa0af4b70..7037e92a50 100644 --- a/tests/integration/cqlengine/columns/test_static_column.py +++ b/tests/integration/cqlengine/columns/test_static_column.py @@ -22,6 +22,9 @@ from tests.integration.cqlengine.base import BaseCassEngTestCase from tests.integration import PROTOCOL_VERSION +# TODO: is this really a protocol limitation, or is it just C* version? +# good enough proxy for now +STATIC_SUPPORTED = PROTOCOL_VERSION >= 2 class TestStaticModel(Model): partition = columns.UUID(primary_key=True, default=uuid4) @@ -30,14 +33,15 @@ class TestStaticModel(Model): text = columns.Text() -@skipUnless(PROTOCOL_VERSION >= 2, "only runs against the cql3 protocol v2.0") +@skipUnless(STATIC_SUPPORTED, "only runs against the cql3 protocol v2.0") class TestStaticColumn(BaseCassEngTestCase): @classmethod def setUpClass(cls): super(TestStaticColumn, cls).setUpClass() drop_table(TestStaticModel) - sync_table(TestStaticModel) + if STATIC_SUPPORTED: # setup and teardown run regardless of skip + sync_table(TestStaticModel) @classmethod def tearDownClass(cls): From 82df9cf07522c699bd79b2b4119c8a2abf8f49f6 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 19 Mar 2015 11:56:27 -0500 Subject: [PATCH 1284/3726] Conditional test around C* list prepend bug, which is not in 1.2.x --- tests/integration/cqlengine/__init__.py | 12 +++++++++--- .../cqlengine/columns/test_container_columns.py | 15 +++++++++++---- tests/integration/cqlengine/query/test_updates.py | 13 ++++++++----- 3 files changed, 28 insertions(+), 12 deletions(-) diff --git a/tests/integration/cqlengine/__init__.py b/tests/integration/cqlengine/__init__.py index 1d0c5234e0..f0322c6bc8 100644 --- a/tests/integration/cqlengine/__init__.py +++ b/tests/integration/cqlengine/__init__.py @@ -18,7 +18,7 @@ from cassandra.cqlengine import connection from cassandra.cqlengine.management import create_keyspace_simple, CQLENG_ALLOW_SCHEMA_MANAGEMENT -from tests.integration import use_single_node, PROTOCOL_VERSION +from tests.integration import get_server_versions, use_single_node, PROTOCOL_VERSION def setup_package(): @@ -29,7 +29,13 @@ def setup_package(): keyspace = 'cqlengine_test' connection.setup(['localhost'], - protocol_version=PROTOCOL_VERSION, - default_keyspace=keyspace) + protocol_version=PROTOCOL_VERSION, + default_keyspace=keyspace) create_keyspace_simple(keyspace, 1) + + +def is_prepend_reversed(): + # do we have https://issues.apache.org/jira/browse/CASSANDRA-8733 ? + ver, _ = get_server_versions() + return not (ver >= (2, 0, 13) or ver >= (2, 1, 3)) diff --git a/tests/integration/cqlengine/columns/test_container_columns.py b/tests/integration/cqlengine/columns/test_container_columns.py index 5859f93629..c49b73db72 100644 --- a/tests/integration/cqlengine/columns/test_container_columns.py +++ b/tests/integration/cqlengine/columns/test_container_columns.py @@ -20,6 +20,7 @@ from cassandra.cqlengine.models import Model, ValidationError import cassandra.cqlengine.columns as columns from cassandra.cqlengine.management import sync_table, drop_table +from tests.integration.cqlengine import is_prepend_reversed from tests.integration.cqlengine.base import BaseCassEngTestCase @@ -240,15 +241,21 @@ def test_element_count_validation(self): def test_partial_updates(self): """ Tests that partial udpates work as expected """ - final = list(range(10)) - initial = final[3:7] + full = list(range(10)) + initial = full[3:7] + m1 = TestListModel.create(int_list=initial) - m1.int_list = final + m1.int_list = full m1.save() + if is_prepend_reversed(): + expected = full[2::-1] + full[3:] + else: + expected = full + m2 = TestListModel.get(partition=m1.partition) - assert list(m2.int_list) == final + self.assertEqual(list(m2.int_list), expected) def test_instantiation_with_column_class(self): """ diff --git a/tests/integration/cqlengine/query/test_updates.py b/tests/integration/cqlengine/query/test_updates.py index 4efb79950a..49c0dc12d6 100644 --- a/tests/integration/cqlengine/query/test_updates.py +++ b/tests/integration/cqlengine/query/test_updates.py @@ -14,12 +14,12 @@ from uuid import uuid4 from cassandra.cqlengine import ValidationError -from cassandra.cqlengine.query import QueryException -from tests.integration.cqlengine.base import BaseCassEngTestCase from cassandra.cqlengine.models import Model from cassandra.cqlengine.management import sync_table, drop_table from cassandra.cqlengine import columns +from tests.integration.cqlengine import is_prepend_reversed +from tests.integration.cqlengine.base import BaseCassEngTestCase class TestQueryUpdateModel(Model): @@ -192,13 +192,16 @@ def test_list_prepend_updates(self): """ Prepend two things since order is reversed by default by CQL """ partition = uuid4() cluster = 1 + original = ["foo"] TestQueryUpdateModel.objects.create( - partition=partition, cluster=cluster, text_list=["foo"]) + partition=partition, cluster=cluster, text_list=original) + prepended = ['bar', 'baz'] TestQueryUpdateModel.objects( partition=partition, cluster=cluster).update( - text_list__prepend=['bar', 'baz']) + text_list__prepend=prepended) obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster) - self.assertEqual(obj.text_list, ["bar", "baz", "foo"]) + expected = (prepended[::-1] if is_prepend_reversed() else prepended) + original + self.assertEqual(obj.text_list, expected) def test_map_update_updates(self): """ Merge a dictionary into existing value """ From aa74a4a81c039c02fad59a52a1219996238ddad7 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 19 Mar 2015 15:02:58 -0500 Subject: [PATCH 1285/3726] add str function to UserType --- cassandra/cqlengine/usertype.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/cassandra/cqlengine/usertype.py b/cassandra/cqlengine/usertype.py index fa94293d5c..33bfec7627 100644 --- a/cassandra/cqlengine/usertype.py +++ b/cassandra/cqlengine/usertype.py @@ -56,6 +56,9 @@ def __eq__(self, other): def __ne__(self, other): return not self.__eq__(other) + def __str__(self): + return "{{{}}}".format(', '.join("'{}': {}".format(k, getattr(self, k)) for k, v in six.iteritems(self._values))) + @classmethod def register_for_keyspace(cls, keyspace): connection.register_udt(keyspace, cls.type_name(), cls) @@ -102,11 +105,6 @@ def __new__(cls, name, bases, attrs): field_defs = [(k, v) for k, v in attrs.items() if isinstance(v, columns.Column)] field_defs = sorted(field_defs, key=lambda x: x[1].position) - # TODO: this plus more validation - #counter_columns = [c for c in defined_columns.values() if isinstance(c, columns.Counter)] - #if counter_columns and data_columns: - # raise ModelDefinitionException('counter models may not have data columns') - def _transform_column(field_name, field_obj): field_dict[field_name] = field_obj field_obj.set_column_name(field_name) From dcb94500584a318f046cee71d7ab9fc8439c749e Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 19 Mar 2015 15:03:30 -0500 Subject: [PATCH 1286/3726] explicit type refresh on sync for proto v2 --- cassandra/cqlengine/management.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cassandra/cqlengine/management.py b/cassandra/cqlengine/management.py index 5b6767ad64..7ea642af8d 100644 --- a/cassandra/cqlengine/management.py +++ b/cassandra/cqlengine/management.py @@ -303,6 +303,7 @@ def _sync_type(ks_name, type_model, omit_subtypes=None): log.debug("sync_type creating new type %s", type_name_qualified) cql = get_create_type(type_model, ks_name) execute(cql) + cluster.refresh_schema(keyspace=ks_name, usertype=type_name) type_model.register_for_keyspace(ks_name) else: defined_fields = defined_types[type_name].field_names From e49c57857cf9b43b6a7fc63becadd131c222e6ee Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 19 Mar 2015 15:04:15 -0500 Subject: [PATCH 1287/3726] don't blow up if we are connected but type is not sync'd --- cassandra/cqlengine/connection.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/cassandra/cqlengine/connection.py b/cassandra/cqlengine/connection.py index 7b1ffcf9b5..ead241c6fd 100644 --- a/cassandra/cqlengine/connection.py +++ b/cassandra/cqlengine/connection.py @@ -200,7 +200,10 @@ def register_udt(keyspace, type_name, klass): global cluster if cluster: - cluster.register_user_type(keyspace, type_name, klass) + try: + cluster.register_user_type(keyspace, type_name, klass) + except UserTypeDoesNotExist: + pass # new types are covered in management sync functions def _register_known_types(cluster): From 4cd60dcadb54fb5e8896234cca40f6e9953da82e Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 19 Mar 2015 16:51:06 -0500 Subject: [PATCH 1288/3726] cqle: add documentation for User Defined Type models --- cassandra/cqlengine/columns.py | 6 +++- cassandra/cqlengine/usertype.py | 20 ++++++++++++ docs/api/cassandra/cqlengine/columns.rst | 2 ++ docs/cqlengine/models.rst | 40 ++++++++++++++++++++++++ 4 files changed, 67 insertions(+), 1 deletion(-) diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index 1491b777db..49d84ff002 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -864,11 +864,15 @@ class UserDefinedType(Column): User Defined Type column http://www.datastax.com/documentation/cql/3.1/cql/cql_using/cqlUseUDT.html + + These columns are represented by a specialization of :class:`cassandra.cqlengine.usertype.UserType`. + + Please see :ref:`user_types` for examples and discussion. """ def __init__(self, user_type, **kwargs): """ - :param type user_type: specifies a :class:`~.Type` model for the column + :param type user_type: specifies the :class:`~.UserType` model of the column """ self.user_type = user_type self.db_type = "frozen<%s>" % user_type.type_name() diff --git a/cassandra/cqlengine/usertype.py b/cassandra/cqlengine/usertype.py index 33bfec7627..849eac093f 100644 --- a/cassandra/cqlengine/usertype.py +++ b/cassandra/cqlengine/usertype.py @@ -132,6 +132,26 @@ def _transform_column(field_name, field_obj): @six.add_metaclass(UserTypeMetaClass) class UserType(BaseUserType): + """ + This class is used to model User Defined Types. To define a type, declare a class inheriting from this, + and assign field types as class attributes: + + .. code-block:: python + + # connect with default keyspace ... + + from cassandra.cqlengine.columns import Text, Integer + from cassandra.cqlengine.usertype import UserType + + class address(UserType): + street = Text() + zipcode = Integer() + + from cassandra.cqlengine import management + management.sync_type(address) + + Please see :ref:`user_types` for a complete example and discussion. + """ __type_name__ = None """ diff --git a/docs/api/cassandra/cqlengine/columns.rst b/docs/api/cassandra/cqlengine/columns.rst index 8dcf690cbd..425e01e03a 100644 --- a/docs/api/cassandra/cqlengine/columns.rst +++ b/docs/api/cassandra/cqlengine/columns.rst @@ -72,6 +72,8 @@ Columns of all types are initialized by passing :class:`.Column` attributes to t .. autoclass:: TimeUUID(**kwargs) +.. autoclass:: UserDefinedType + .. autoclass:: UUID(**kwargs) .. autoclass:: VarInt(**kwargs) diff --git a/docs/cqlengine/models.rst b/docs/cqlengine/models.rst index aef77f504d..002ce3de65 100644 --- a/docs/cqlengine/models.rst +++ b/docs/cqlengine/models.rst @@ -178,3 +178,43 @@ automatically saved into the discriminator column. The discriminator column may Additionally, if you set ``index=True`` on your discriminator column, you can execute queries against specialized subclasses, and a ``WHERE`` clause will be automatically added to your query, returning only rows of that type. Note that you must define a unique ``__discriminator_value__`` to each subclass, and that you can only assign a single discriminator column per model. + +.. _user_types: + +User Defined Types +================== +cqlengine models User Defined Types (UDTs) much like tables, with fields defined by column type attributes. However, UDT instances +are only created, presisted, and queried via table Models. A short example to introduce the pattern:: + + from cassandra.cqlengine.columns import * + from cassandra.cqlengine.models import Model + from cassandra.cqlengine.usertype import UserType + + class address(UserType): + street = Text() + zipcode = Integer() + + class users(Model): + __keyspace__ = 'account' + name = Text(primary_key=True) + addr = UserDefinedType(address) + + sync_table(users) + + users.create(name="Joe", addr=address(street="Easy St.", zip=99999)) + user = users.objects(name="Joe")[0] + print user.name, user.addr + # Joe {'street': Easy St., 'zipcode': None} + +UDTs are modeled by inheriting :class:`~.usertype.UserType`, and setting column type attributes. Types are then used in defining +models by declaring a column of type :class:`~.columns.UserDefinedType`, with the ``UserType`` class as a parameter. + +``sync_table`` will implicitly +synchronize any types contained in the table. Alternatively :func:`~.management.sync_type` can be used to create/alter types +explicitly. + +Upon declaration, types are automatically registered with the driver, so query results return instances of your ``UserType`` +class*. + +***Note**: UDTs were not added to the native protocol until v3. When setting up the cqlengine connection, be sure to specify +``protocol_version=3``. If using an earlier version, UDT queries will still work, but the returned type will be a namedtuple. From dafddfb4cc13d6fa049b3f6c84f7ec420fba5e3e Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 20 Mar 2015 09:30:02 -0500 Subject: [PATCH 1289/3726] cqle: add UDTValueManager to detect sub-field updatees UDTs are still updated en masse when changed. This aligns with concept of 'frozen' types. --- cassandra/cqlengine/columns.py | 12 ++++++++++++ cassandra/cqlengine/usertype.py | 7 +++++++ 2 files changed, 19 insertions(+) diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index 49d84ff002..ec6a468f60 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -859,6 +859,16 @@ def sub_columns(self): return [self.key_col, self.value_col] +class UDTValueManager(BaseValueManager): + @property + def changed(self): + return self.value != self.previous_value or self.value.has_changed_fields() + + def reset_previous_value(self): + self.value.reset_changed_fields() + self.previous_value = copy(self.value) + + class UserDefinedType(Column): """ User Defined Type column @@ -870,6 +880,8 @@ class UserDefinedType(Column): Please see :ref:`user_types` for examples and discussion. """ + value_manager = UDTValueManager + def __init__(self, user_type, **kwargs): """ :param type user_type: specifies the :class:`~.UserType` model of the column diff --git a/cassandra/cqlengine/usertype.py b/cassandra/cqlengine/usertype.py index 849eac093f..bcff9c97fe 100644 --- a/cassandra/cqlengine/usertype.py +++ b/cassandra/cqlengine/usertype.py @@ -59,6 +59,13 @@ def __ne__(self, other): def __str__(self): return "{{{}}}".format(', '.join("'{}': {}".format(k, getattr(self, k)) for k, v in six.iteritems(self._values))) + def has_changed_fields(self): + return any(v.changed for v in self._values.values()) + + def reset_changed_fields(self): + for v in self._values.values(): + v.reset_previous_value() + @classmethod def register_for_keyspace(cls, keyspace): connection.register_udt(keyspace, cls.type_name(), cls) From 9cb1d7efc2e3d4031c86811246bcf4d4a01b2208 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 20 Mar 2015 11:37:12 -0500 Subject: [PATCH 1290/3726] Don't require ccm for USE_CASS_EXTERNAL --- tests/integration/__init__.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index ebc038a362..518c22364a 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -85,11 +85,12 @@ def _tuple_version(version_string): default_cassandra_version = '2.1.2' if USE_CASS_EXTERNAL and CCMClusterFactory: - # see if the external instance is running in ccm - path = common.get_default_path() - name = common.current_cluster_name(path) - CCM_CLUSTER = CCMClusterFactory.load(common.get_default_path(), name) - CCM_CLUSTER.start(wait_for_binary_proto=True, wait_other_notice=True) + if CCMClusterFactory: + # see if the external instance is running in ccm + path = common.get_default_path() + name = common.current_cluster_name(path) + CCM_CLUSTER = CCMClusterFactory.load(common.get_default_path(), name) + CCM_CLUSTER.start(wait_for_binary_proto=True, wait_other_notice=True) cass_ver, _ = get_server_versions() default_cassandra_version = '.'.join('%d' % i for i in cass_ver) From 8b7beb3966156857eb94b0dd0f65e731300311e0 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 20 Mar 2015 12:11:23 -0500 Subject: [PATCH 1291/3726] Don't require ccm for USE_CASS_EXTERNAL, for real --- tests/integration/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 518c22364a..ab720779ea 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -84,7 +84,7 @@ def _tuple_version(version_string): default_cassandra_version = '2.1.2' -if USE_CASS_EXTERNAL and CCMClusterFactory: +if USE_CASS_EXTERNAL: if CCMClusterFactory: # see if the external instance is running in ccm path = common.get_default_path() From 8c98eb22d022f6385281edfd8d4836ea0ee7e59a Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 20 Mar 2015 14:55:52 -0500 Subject: [PATCH 1292/3726] cqle: repr for Model prints all cols. str prints keys --- cassandra/cqlengine/models.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/cassandra/cqlengine/models.py b/cassandra/cqlengine/models.py index e56220babb..35d10bb15d 100644 --- a/cassandra/cqlengine/models.py +++ b/cassandra/cqlengine/models.py @@ -375,12 +375,17 @@ def __init__(self, **values): self._timeout = connection.NOT_SET def __repr__(self): + return '{}({})'.format(self.__class__.__name__, + ', '.join('{}={!r}'.format(k, getattr(self, k)) + for k in self._defined_columns.keys() + if k != self._discriminator_column_name)) + + def __str__(self): """ Pretty printing of models by their primary key """ return '{} <{}>'.format(self.__class__.__name__, - ', '.join(('{}={}'.format(k, getattr(self, k)) for k, v in six.iteritems(self._primary_keys))) - ) + ', '.join('{}={}'.format(k, getattr(self, k)) for k in self._primary_keys.keys())) @classmethod def _discover_polymorphic_submodels(cls): From 79bd4fac173297c970d921e619569c96004c84b5 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 20 Mar 2015 15:33:30 -0500 Subject: [PATCH 1293/3726] cqle: add dict access sugar to UserType --- cassandra/cqlengine/usertype.py | 38 ++++++++++++++++++++++++++++++++- 1 file changed, 37 insertions(+), 1 deletion(-) diff --git a/cassandra/cqlengine/usertype.py b/cassandra/cqlengine/usertype.py index bcff9c97fe..1e30fc829c 100644 --- a/cassandra/cqlengine/usertype.py +++ b/cassandra/cqlengine/usertype.py @@ -1,7 +1,6 @@ import re import six -from cassandra.cluster import UserTypeDoesNotExist from cassandra.util import OrderedDict from cassandra.cqlengine import CQLEngineException from cassandra.cqlengine import columns @@ -66,6 +65,43 @@ def reset_changed_fields(self): for v in self._values.values(): v.reset_previous_value() + def __iter__(self): + for field in self._fields.keys(): + yield field + + def __getitem__(self, key): + if not isinstance(key, six.string_types): + raise TypeError + if key not in self._fields.keys(): + raise KeyError + return getattr(self, key) + + def __setitem__(self, key, val): + if not isinstance(key, six.string_types): + raise TypeError + if key not in self._fields.keys(): + raise KeyError + return setattr(self, key, val) + + def __len__(self): + try: + return self._len + except: + self._len = len(self._columns.keys()) + return self._len + + def keys(self): + """ Returns a list of column IDs. """ + return [k for k in self] + + def values(self): + """ Returns list of column values. """ + return [self[k] for k in self] + + def items(self): + """ Returns a list of column ID/value tuples. """ + return [(k, self[k]) for k in self] + @classmethod def register_for_keyspace(cls, keyspace): connection.register_udt(keyspace, cls.type_name(), cls) From 4f7db5cd55cf4e3174628a2846bbaf3ecf2acf60 Mon Sep 17 00:00:00 2001 From: Kishan Karunaratne Date: Fri, 20 Mar 2015 14:32:01 -0700 Subject: [PATCH 1294/3726] UDT cqlengine integration tests --- .../integration/cqlengine/model/test_udts.py | 248 ++++++++++++++++++ 1 file changed, 248 insertions(+) create mode 100644 tests/integration/cqlengine/model/test_udts.py diff --git a/tests/integration/cqlengine/model/test_udts.py b/tests/integration/cqlengine/model/test_udts.py new file mode 100644 index 0000000000..9514da3b5e --- /dev/null +++ b/tests/integration/cqlengine/model/test_udts.py @@ -0,0 +1,248 @@ +# Copyright 2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from datetime import date, datetime +from uuid import UUID +from decimal import Decimal +import pytz + +from cassandra.cqlengine.models import Model +from cassandra.cqlengine.usertype import UserType +from cassandra.cqlengine import columns +from cassandra.cqlengine.management import sync_table, sync_type, create_keyspace_simple, drop_keyspace + +from tests.integration.cqlengine.base import BaseCassEngTestCase + +class UserDefinedTypeTests(BaseCassEngTestCase): + + def test_can_create_udts(self): + class User(UserType): + age = columns.Integer() + name = columns.Text() + + sync_type("cqlengine_test", User) + user = User(age=42, name="John") + self.assertEqual(42, user.age) + self.assertEqual("John", user.name) + + # Add a field + class User(UserType): + age = columns.Integer() + name = columns.Text() + gender = columns.Text() + + sync_type("cqlengine_test", User) + user = User(age=42, name="John", gender="male") + self.assertEqual(42, user.age) + self.assertEqual("John", user.name) + self.assertEqual("male", user.gender) + + # Remove a field + class User(UserType): + age = columns.Integer() + name = columns.Text() + + sync_type("cqlengine_test", User) + user = User(age=42, name="John", gender="male") + with self.assertRaises(AttributeError): + user.gender + + def test_can_insert_udts(self): + class User(UserType): + age = columns.Integer() + name = columns.Text() + + class UserModel(Model): + id = columns.Integer(primary_key=True) + info = columns.UserDefinedType(User) + + sync_table(UserModel) + + user = User(age=42, name="John") + UserModel.create(id=0, info=user) + + self.assertEqual(1, UserModel.objects.count()) + + john = UserModel.objects().first() + self.assertEqual(0, john.id) + self.assertTrue(type(john.info) is User) + self.assertEqual(42, john.info.age) + self.assertEqual("John", john.info.name) + + def test_can_update_udts(self): + class User(UserType): + age = columns.Integer() + name = columns.Text() + + class UserModel(Model): + id = columns.Integer(primary_key=True) + info = columns.UserDefinedType(User) + + sync_table(UserModel) + + user = User(age=42, name="John") + created_user = UserModel.create(id=0, info=user) + + john_info = UserModel.objects().first().info + self.assertEqual(42, john_info.age) + self.assertEqual("John", john_info.name) + + created_user.info = User(age=22, name="Mary") + created_user.save() + + mary_info = UserModel.objects().first().info + self.assertEqual(22, mary_info.age) + self.assertEqual("Mary", mary_info.name) + + def test_can_create_same_udt_different_keyspaces(self): + class User(UserType): + age = columns.Integer() + name = columns.Text() + + sync_type("cqlengine_test", User) + + create_keyspace_simple("simplex", 1) + sync_type("simplex", User) + drop_keyspace("simplex") + + def test_can_insert_partial_udts(self): + class User(UserType): + age = columns.Integer() + name = columns.Text() + gender = columns.Text() + + class UserModel(Model): + id = columns.Integer(primary_key=True) + info = columns.UserDefinedType(User) + + sync_table(UserModel) + + user = User(age=42, name="John") + UserModel.create(id=0, info=user) + + john_info = UserModel.objects().first().info + self.assertEqual(42, john_info.age) + self.assertEqual("John", john_info.name) + self.assertIsNone(john_info.gender) + + user = User(age=42) + UserModel.create(id=0, info=user) + + john_info = UserModel.objects().first().info + self.assertEqual(42, john_info.age) + self.assertIsNone(john_info.name) + self.assertIsNone(john_info.gender) + + def test_can_insert_nested_udts(self): + class Depth_0(UserType): + age = columns.Integer() + name = columns.Text() + + class Depth_1(UserType): + value = columns.UserDefinedType(Depth_0) + + class Depth_2(UserType): + value = columns.UserDefinedType(Depth_1) + + class Depth_3(UserType): + value = columns.UserDefinedType(Depth_2) + + class DepthModel(Model): + id = columns.Integer(primary_key=True) + v_0 = columns.UserDefinedType(Depth_0) + v_1 = columns.UserDefinedType(Depth_1) + v_2 = columns.UserDefinedType(Depth_2) + v_3 = columns.UserDefinedType(Depth_3) + + sync_table(DepthModel) + + udts = [Depth_0(age=42, name="John")] + udts.append(Depth_1(value=udts[0])) + udts.append(Depth_2(value=udts[1])) + udts.append(Depth_3(value=udts[2])) + + DepthModel.create(id=0, v_0=udts[0], v_1=udts[1], v_2=udts[2], v_3=udts[3]) + output = DepthModel.objects().first() + + self.assertEqual(udts[0], output.v_0) + self.assertEqual(udts[1], output.v_1) + self.assertEqual(udts[2], output.v_2) + self.assertEqual(udts[3], output.v_3) + + def test_can_insert_udts_with_nulls(self): + class AllDatatypes(UserType): + a = columns.Ascii() + b = columns.BigInt() + c = columns.Blob() + d = columns.Boolean() + e = columns.Date() + f = columns.DateTime() + g = columns.Decimal() + h = columns.Float() + i = columns.Inet() + j = columns.Integer() + k = columns.Text() + l = columns.TimeUUID() + m = columns.UUID() + n = columns.VarInt() + + class AllDatatypesModel(Model): + id = columns.Integer(primary_key=True) + data = columns.UserDefinedType(AllDatatypes) + + sync_table(AllDatatypesModel) + + input = AllDatatypes(a=None, b=None, c=None, d=None, e=None, f=None, g=None, h=None, i=None, j=None, k=None, l=None, m=None, n=None) + AllDatatypesModel.create(id=0, data=input) + + self.assertEqual(1, AllDatatypesModel.objects.count()) + + output = AllDatatypesModel.objects().first().data + self.assertEqual(input, output) + + def test_can_insert_udts_with_all_datatypes(self): + class AllDatatypes(UserType): + a = columns.Ascii() + b = columns.BigInt() + c = columns.Blob() + d = columns.Boolean() + e = columns.Date() + f = columns.DateTime() + g = columns.Decimal() + h = columns.Float(double_precision=True) + i = columns.Inet() + j = columns.Integer() + k = columns.Text() + l = columns.TimeUUID() + m = columns.UUID() + n = columns.VarInt() + + class AllDatatypesModel(Model): + id = columns.Integer(primary_key=True) + data = columns.UserDefinedType(AllDatatypes) + + sync_table(AllDatatypesModel) + + input = AllDatatypes(a='ascii', b=2 ** 63 - 1, c=bytearray(b'hello world'), d=True, e=date(1970, 1, 1), + f=datetime.fromtimestamp(872835240, tz=pytz.timezone('America/New_York')).astimezone(pytz.UTC).replace(tzinfo=None), + g=Decimal('12.3E+7'), h=3.4028234663852886e+38, i='123.123.123.123', j=2147483647, + k='text', l= UUID('FE2B4360-28C6-11E2-81C1-0800200C9A66'), + m=UUID('067e6162-3b6f-4ae2-a171-2470b63dff00'), n=int(str(2147483647) + '000')) + alldata = AllDatatypesModel.create(id=0, data=input) + + self.assertEqual(1, AllDatatypesModel.objects.count()) + output = AllDatatypesModel.objects().first().data + + for i in range(ord('a'), ord('a') + 14): + self.assertEqual(input[chr(i)], output[chr(i)]) From 7542c896021e933da41b1ba2b7bc74b11108196e Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 20 Mar 2015 16:52:17 -0500 Subject: [PATCH 1295/3726] Avoid version query: Hack to make tests.integration load in python3 --- tests/integration/__init__.py | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index ab720779ea..ce3f042bf6 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -25,6 +25,7 @@ import os from threading import Event +import six from itertools import groupby @@ -82,7 +83,7 @@ def _tuple_version(version_string): USE_CASS_EXTERNAL = bool(os.getenv('USE_CASS_EXTERNAL', False)) -default_cassandra_version = '2.1.2' +default_cassandra_version = '2.1.3' if USE_CASS_EXTERNAL: if CCMClusterFactory: @@ -92,8 +93,19 @@ def _tuple_version(version_string): CCM_CLUSTER = CCMClusterFactory.load(common.get_default_path(), name) CCM_CLUSTER.start(wait_for_binary_proto=True, wait_other_notice=True) - cass_ver, _ = get_server_versions() - default_cassandra_version = '.'.join('%d' % i for i in cass_ver) + # Not sure what's going on, but the server version query + # hangs in python3. This appears to be related to running inside of + # nosetests, and only for this query that would run while loading the + # module. + # This is a hack to make it run with default cassandra version for PY3. + # Not happy with it, but need to move on for now. + if not six.PY3: + cass_ver, _ = get_server_versions() + default_cassandra_version = '.'.join('%d' % i for i in cass_ver) + else: + if not os.getenv('CASSANDRA_VERSION'): + log.warning("Using default C* version %s because external server cannot be queried" % default_cassandra_version) + CASSANDRA_DIR = os.getenv('CASSANDRA_DIR', None) CASSANDRA_VERSION = os.getenv('CASSANDRA_VERSION', default_cassandra_version) From 95b3a8ad3d8e476293de4a33b4725da9b6f1fc62 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 20 Mar 2015 17:50:10 -0500 Subject: [PATCH 1296/3726] don't require UDT fields when CQL encoding registered type --- cassandra/cluster.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 35bb072984..34ba3a7acb 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1737,7 +1737,7 @@ def user_type_registered(self, keyspace, user_type, klass): def encode(val): return '{ %s }' % ' , '.join('%s : %s' % ( field_name, - self.encoder.cql_encode_all_types(getattr(val, field_name)) + self.encoder.cql_encode_all_types(getattr(val, field_name, None)) ) for field_name in type_meta.field_names) self.encoder.mapping[klass] = encode From 067b2c42411a63ec63dd226cee9c1b7ad9ea859d Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 23 Mar 2015 09:22:16 -0500 Subject: [PATCH 1297/3726] cqle: remove pytz as dependency of test_udts --- tests/integration/cqlengine/model/test_udts.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/tests/integration/cqlengine/model/test_udts.py b/tests/integration/cqlengine/model/test_udts.py index 9514da3b5e..d430704a17 100644 --- a/tests/integration/cqlengine/model/test_udts.py +++ b/tests/integration/cqlengine/model/test_udts.py @@ -13,9 +13,8 @@ # limitations under the License. from datetime import date, datetime -from uuid import UUID from decimal import Decimal -import pytz +from uuid import UUID from cassandra.cqlengine.models import Model from cassandra.cqlengine.usertype import UserType @@ -235,7 +234,7 @@ class AllDatatypesModel(Model): sync_table(AllDatatypesModel) input = AllDatatypes(a='ascii', b=2 ** 63 - 1, c=bytearray(b'hello world'), d=True, e=date(1970, 1, 1), - f=datetime.fromtimestamp(872835240, tz=pytz.timezone('America/New_York')).astimezone(pytz.UTC).replace(tzinfo=None), + f=datetime.utcfromtimestamp(872835240), g=Decimal('12.3E+7'), h=3.4028234663852886e+38, i='123.123.123.123', j=2147483647, k='text', l= UUID('FE2B4360-28C6-11E2-81C1-0800200C9A66'), m=UUID('067e6162-3b6f-4ae2-a171-2470b63dff00'), n=int(str(2147483647) + '000')) From e4c4c675f6b16b7007586aeb716e2e69148b42e3 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 23 Mar 2015 10:52:40 -0500 Subject: [PATCH 1298/3726] Evict cached UDT class when new type is registered. --- cassandra/cluster.py | 2 ++ cassandra/cqltypes.py | 9 +++++++++ 2 files changed, 11 insertions(+) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 34ba3a7acb..e523e38162 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -47,6 +47,7 @@ UnsupportedOperation, Unauthorized) from cassandra.connection import (ConnectionException, ConnectionShutdown, ConnectionHeartbeat) +from cassandra.cqltypes import UserType from cassandra.encoder import Encoder from cassandra.protocol import (QueryMessage, ResultMessage, ErrorMessage, ReadTimeoutErrorMessage, @@ -615,6 +616,7 @@ def __init__(self, street, zipcode): self._user_types[keyspace][user_type] = klass for session in self.sessions: session.user_type_registered(keyspace, user_type, klass) + UserType.evict_udt_class(keyspace, user_type) def get_min_requests_per_connection(self, host_distance): return self._min_requests_per_connection[host_distance] diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index e51e774ece..5b95d4c04b 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -899,6 +899,15 @@ def make_udt_class(cls, keyspace, udt_name, names_and_types, mapped_class): cls._cache[(keyspace, udt_name)] = instance return instance + @classmethod + def evict_udt_class(cls, keyspace, udt_name): + if six.PY2 and isinstance(udt_name, unicode): + udt_name = udt_name.encode('utf-8') + try: + del cls._cache[(keyspace, udt_name)] + except KeyError: + pass + @classmethod def apply_parameters(cls, subtypes, names): keyspace = subtypes[0] From 6a49d9dcd03f5378ba09819389a87bef067b1142 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 23 Mar 2015 10:53:14 -0500 Subject: [PATCH 1299/3726] CQL encode floats with repr to preserve precision. --- cassandra/encoder.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/cassandra/encoder.py b/cassandra/encoder.py index 5c5242b9ed..02eed2aa22 100644 --- a/cassandra/encoder.py +++ b/cassandra/encoder.py @@ -67,7 +67,7 @@ class Encoder(object): def __init__(self): self.mapping = { - float: self.cql_encode_object, + float: self.cql_encode_float, bytearray: self.cql_encode_bytes, str: self.cql_encode_str, int: self.cql_encode_object, @@ -138,6 +138,12 @@ def cql_encode_object(self, val): """ return str(val) + def cql_encode_float(self, val): + """ + Encode floats using repr to preserve precision + """ + return repr(val) + def cql_encode_datetime(self, val): """ Converts a :class:`datetime.datetime` object to a (string) integer timestamp From bb4f050c46b694a212531c80d2ad5395765b3e41 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 23 Mar 2015 11:07:36 -0500 Subject: [PATCH 1300/3726] Test float precision in CQL encoding --- tests/unit/test_parameter_binding.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/unit/test_parameter_binding.py b/tests/unit/test_parameter_binding.py index 596f291e3a..f649c5207c 100644 --- a/tests/unit/test_parameter_binding.py +++ b/tests/unit/test_parameter_binding.py @@ -68,6 +68,10 @@ def test_quote_escaping(self): result = bind_params("%s", ("""'ef''ef"ef""ef'""",), Encoder()) self.assertEqual(result, """'''ef''''ef"ef""ef'''""") + def test_float_precision(self): + f = 3.4028234663852886e+38 + self.assertEqual(float(bind_params("%s", (f,), Encoder())), f) + class BoundStatementTestCase(unittest.TestCase): From becb61b380042d48d70ed31506266952d78f4c27 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 23 Mar 2015 11:08:59 -0500 Subject: [PATCH 1301/3726] changelog update --- CHANGELOG.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 10e45c3b1c..af2d611309 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -16,6 +16,7 @@ Bug Fixes * Workaround for rounding anomaly in datetime.utcfromtime (Python 3.4) (PYTHON-230) * Normalize text serialization for lookup in OrderedMap (PYTHON-231) * Support reading CompositeType data (PYTHON-234) +* Preserve float precision in CQL encoding (PYTHON-243) 2.1.4 ===== From e708babb48fbad7d564289fe559cb632fab6034e Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 23 Mar 2015 12:05:32 -0500 Subject: [PATCH 1302/3726] cqle: skip UDT tests for earlier versions of C* --- tests/integration/cqlengine/model/test_udts.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/integration/cqlengine/model/test_udts.py b/tests/integration/cqlengine/model/test_udts.py index d430704a17..e2a6c883fc 100644 --- a/tests/integration/cqlengine/model/test_udts.py +++ b/tests/integration/cqlengine/model/test_udts.py @@ -14,6 +14,7 @@ from datetime import date, datetime from decimal import Decimal +from unittest import skipUnless from uuid import UUID from cassandra.cqlengine.models import Model @@ -21,8 +22,11 @@ from cassandra.cqlengine import columns from cassandra.cqlengine.management import sync_table, sync_type, create_keyspace_simple, drop_keyspace +from tests.integration import get_server_versions from tests.integration.cqlengine.base import BaseCassEngTestCase + +@skipUnless(get_server_versions()[0] > (2, 1, 0), "UDTs require Cassandra 2.1 or greater") class UserDefinedTypeTests(BaseCassEngTestCase): def test_can_create_udts(self): From 0e55a218eb5ba8ee79486561f8b8eded138d08d0 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 23 Mar 2015 14:30:26 -0500 Subject: [PATCH 1303/3726] cqle: defer get_server_version until UserDefinedTypeTests class setup I don't know why, but the select query hangs when run inside of nosetests for python 3. --- tests/integration/cqlengine/model/test_udts.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/tests/integration/cqlengine/model/test_udts.py b/tests/integration/cqlengine/model/test_udts.py index e2a6c883fc..ab9386b8b4 100644 --- a/tests/integration/cqlengine/model/test_udts.py +++ b/tests/integration/cqlengine/model/test_udts.py @@ -14,7 +14,7 @@ from datetime import date, datetime from decimal import Decimal -from unittest import skipUnless +import unittest from uuid import UUID from cassandra.cqlengine.models import Model @@ -26,9 +26,13 @@ from tests.integration.cqlengine.base import BaseCassEngTestCase -@skipUnless(get_server_versions()[0] > (2, 1, 0), "UDTs require Cassandra 2.1 or greater") class UserDefinedTypeTests(BaseCassEngTestCase): + @classmethod + def setUpClass(self): + if get_server_versions()[0] < (2, 1, 0): + raise unittest.SkipTest("UDTs require Cassandra 2.1 or greater") + def test_can_create_udts(self): class User(UserType): age = columns.Integer() From f272723277c62ffd80b9485fb3b92244b1f53c6c Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 23 Mar 2015 14:31:28 -0500 Subject: [PATCH 1304/3726] Update mapper doc link in README --- README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.rst b/README.rst index a0415c3c88..e976f93a66 100644 --- a/README.rst +++ b/README.rst @@ -36,7 +36,7 @@ Object Mapper ------------- cqlengine (originally developed by Blake Eggleston and Jon Haddad, with contributions from the community) is now maintained as an integral part of this package. Refer to -`documentation here `## UPDATE LINK ##. +`documentation here `_. Reporting Problems ------------------ From cea17aaf1fe364c8b77e85e7d852bee047cef19d Mon Sep 17 00:00:00 2001 From: Kishan Karunaratne Date: Mon, 23 Mar 2015 15:23:34 -0700 Subject: [PATCH 1305/3726] remove obsolete cqlengine tests --- .../columns/test_container_columns.py | 17 --------- .../model/test_class_construction.py | 35 ------------------- .../cqlengine/model/test_validation.py | 15 -------- .../operators/test_assignment_operators.py | 14 -------- .../cqlengine/query/test_updates.py | 20 +++++------ .../cqlengine/statements/test_quoter.py | 14 -------- 6 files changed, 10 insertions(+), 105 deletions(-) delete mode 100644 tests/integration/cqlengine/model/test_validation.py delete mode 100644 tests/integration/cqlengine/operators/test_assignment_operators.py delete mode 100644 tests/integration/cqlengine/statements/test_quoter.py diff --git a/tests/integration/cqlengine/columns/test_container_columns.py b/tests/integration/cqlengine/columns/test_container_columns.py index c49b73db72..03a94e0df4 100644 --- a/tests/integration/cqlengine/columns/test_container_columns.py +++ b/tests/integration/cqlengine/columns/test_container_columns.py @@ -511,23 +511,6 @@ def test_default_empty_container_saving(self): m = TestMapModel.get(partition=pkey) self.assertEqual(m.int_map, tmap) -# def test_partial_update_creation(self): -# """ -# Tests that proper update statements are created for a partial list update -# :return: -# """ -# final = range(10) -# initial = final[3:7] -# -# ctx = {} -# col = columns.List(columns.Integer, db_field="TEST") -# statements = col.get_update_statement(final, initial, ctx) -# -# assert len([v for v in ctx.values() if [0,1,2] == v.value]) == 1 -# assert len([v for v in ctx.values() if [7,8,9] == v.value]) == 1 -# assert len([s for s in statements if '"TEST" = "TEST" +' in s]) == 1 -# assert len([s for s in statements if '+ "TEST"' in s]) == 1 - class TestCamelMapModel(Model): diff --git a/tests/integration/cqlengine/model/test_class_construction.py b/tests/integration/cqlengine/model/test_class_construction.py index a267e596a3..18cda48f44 100644 --- a/tests/integration/cqlengine/model/test_class_construction.py +++ b/tests/integration/cqlengine/model/test_class_construction.py @@ -136,31 +136,6 @@ class TestModel(Model): assert TestModel.column_family_name(include_keyspace=False) == 'test_model' - def test_normal_fields_can_be_defined_between_primary_keys(self): - """ - Tests tha non primary key fields can be defined between primary key fields - """ - - def test_at_least_one_non_primary_key_column_is_required(self): - """ - Tests that an error is raised if a model doesn't contain at least one primary key field - """ - - def test_model_keyspace_attribute_must_be_a_string(self): - """ - Tests that users can't set the keyspace to None, or something else - """ - - def test_indexes_arent_allowed_on_models_with_multiple_primary_keys(self): - """ - Tests that attempting to define an index on a model with multiple primary keys fails - """ - - def test_meta_data_is_not_inherited(self): - """ - Test that metadata defined in one class, is not inherited by subclasses - """ - def test_partition_keys(self): """ Test compound partition key definition @@ -378,13 +353,3 @@ class AlreadyLoadedTest(ConcreteModelWithCol): new_field = columns.Integer() self.assertGreater(len(AlreadyLoadedTest()), length) - - - - - - - - - - diff --git a/tests/integration/cqlengine/model/test_validation.py b/tests/integration/cqlengine/model/test_validation.py deleted file mode 100644 index fa957a1416..0000000000 --- a/tests/integration/cqlengine/model/test_validation.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright 2015 DataStax, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - diff --git a/tests/integration/cqlengine/operators/test_assignment_operators.py b/tests/integration/cqlengine/operators/test_assignment_operators.py deleted file mode 100644 index 55181d93fe..0000000000 --- a/tests/integration/cqlengine/operators/test_assignment_operators.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright 2015 DataStax, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - diff --git a/tests/integration/cqlengine/query/test_updates.py b/tests/integration/cqlengine/query/test_updates.py index 49c0dc12d6..c7c091150f 100644 --- a/tests/integration/cqlengine/query/test_updates.py +++ b/tests/integration/cqlengine/query/test_updates.py @@ -224,13 +224,13 @@ def test_map_update_none_deletes_key(self): converting None to null (and the cql library is no longer in active developement). """ - # partition = uuid4() - # cluster = 1 - # TestQueryUpdateModel.objects.create( - # partition=partition, cluster=cluster, - # text_map={"foo": '1', "bar": '2'}) - # TestQueryUpdateModel.objects( - # partition=partition, cluster=cluster).update( - # text_map__update={"bar": None}) - # obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster) - # self.assertEqual(obj.text_map, {"foo": '1'}) + partition = uuid4() + cluster = 1 + TestQueryUpdateModel.objects.create( + partition=partition, cluster=cluster, + text_map={"foo": '1', "bar": '2'}) + TestQueryUpdateModel.objects( + partition=partition, cluster=cluster).update( + text_map__update={"bar": None}) + obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster) + self.assertEqual(obj.text_map, {"foo": '1'}) diff --git a/tests/integration/cqlengine/statements/test_quoter.py b/tests/integration/cqlengine/statements/test_quoter.py deleted file mode 100644 index 55181d93fe..0000000000 --- a/tests/integration/cqlengine/statements/test_quoter.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright 2015 DataStax, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - From 1c57b2b32a8fc58ac9b355d8f8679bb77d931e97 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 23 Mar 2015 17:31:36 -0500 Subject: [PATCH 1306/3726] cqle: remove outdated comment about test failing --- tests/integration/cqlengine/query/test_updates.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/tests/integration/cqlengine/query/test_updates.py b/tests/integration/cqlengine/query/test_updates.py index c7c091150f..799e74d135 100644 --- a/tests/integration/cqlengine/query/test_updates.py +++ b/tests/integration/cqlengine/query/test_updates.py @@ -219,10 +219,6 @@ def test_map_update_updates(self): def test_map_update_none_deletes_key(self): """ The CQL behavior is if you set a key in a map to null it deletes that key from the map. Test that this works with __update. - - This test fails because of a bug in the cql python library not - converting None to null (and the cql library is no longer in active - developement). """ partition = uuid4() cluster = 1 From 90bf4a3a19fdd7af2805b702fd5d4de84f328c9a Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 24 Mar 2015 16:04:21 -0500 Subject: [PATCH 1307/3726] Add new date, time to CQL literal encoding table. --- docs/getting_started.rst | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/docs/getting_started.rst b/docs/getting_started.rst index 2d458b3ea8..fa5cda32ce 100644 --- a/docs/getting_started.rst +++ b/docs/getting_started.rst @@ -223,8 +223,11 @@ following way: | | ``buffer`` | ``blob`` | | | ``bytearray`` | | +--------------------+-------------------------+ - | | ``date`` | ``timestamp`` | - | | ``datetime`` | | + | ``date`` | ``date`` | + +--------------------+-------------------------+ + | ``datetime`` | ``timestamp`` | + +--------------------+-------------------------+ + | ``time`` | ``time`` | +--------------------+-------------------------+ | | ``list`` | ``list`` | | | ``tuple`` | | From 064dc0b652730716738f9f0541e1cae362ea2108 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 24 Mar 2015 16:04:39 -0500 Subject: [PATCH 1308/3726] Expand code doc for util.Date, util.Time --- cassandra/util.py | 35 ++++++++++++++++++++++++++++++++++- 1 file changed, 34 insertions(+), 1 deletion(-) diff --git a/cassandra/util.py b/cassandra/util.py index 6de2d084cb..35e1e5e40f 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -110,7 +110,6 @@ def uuid_from_time(time_arg, node=None, clock_seq=None): if clock_seq > 0x3fff: raise ValueError('clock_seq is out of range (need a 14-bit value)') - clock_seq_low = clock_seq & 0xff clock_seq_hi_variant = 0x80 | ((clock_seq >> 8) & 0x3f) @@ -824,6 +823,13 @@ class Time(object): nanosecond_time = 0 def __init__(self, value): + """ + Initializer value can be: + + - integer_type: absolute nanoseconds in the day + - datetime.time: built-in time + - string_type: a string time of the form "HH:MM:SS[.mmmuuunnn]" + """ if isinstance(value, six.integer_types): self._from_timestamp(value) elif isinstance(value, datetime.time): @@ -835,20 +841,32 @@ def __init__(self, value): @property def hour(self): + """ + The hour component of this time (0-23) + """ return self.nanosecond_time // Time.HOUR @property def minute(self): + """ + The minute component of this time (0-59) + """ minutes = self.nanosecond_time // Time.MINUTE return minutes % 60 @property def second(self): + """ + The second component of this time (0-59) + """ seconds = self.nanosecond_time // Time.SECOND return seconds % 60 @property def nanosecond(self): + """ + The fractional seconds component of the time, in nanoseconds + """ return self.nanosecond_time % Time.SECOND def _from_timestamp(self, t): @@ -915,6 +933,13 @@ class Date(object): days_from_epoch = 0 def __init__(self, value): + """ + Initializer value can be: + + - integer_type: absolute days from epoch (1970, 1, 1). Can be negative. + - datetime.date: built-in date + - string_type: a string time of the form "yyyy-mm-dd" + """ if isinstance(value, six.integer_types): self.days_from_epoch = value elif isinstance(value, (datetime.date, datetime.datetime)): @@ -926,9 +951,17 @@ def __init__(self, value): @property def seconds(self): + """ + Absolute seconds from epoch (can be negative) + """ return self.days_from_epoch * Date.DAY def date(self): + """ + Return a built-in datetime.date for Dates falling in the years [datetime.MINYEAR, datetime.MAXYEAR] + + ValueError is raised for Dates outside this range. + """ try: dt = datetime_from_timestamp(self.seconds) return datetime.date(dt.year, dt.month, dt.day) From 56be4cbac28354a9357e01d2ff09c9460dc4c980 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 30 Mar 2015 14:29:26 -0500 Subject: [PATCH 1309/3726] release version --- cassandra/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index fa178592eb..fa26054fc2 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -23,7 +23,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (2, 1, 4, 'post') +__version_info__ = (2, 5, 0) __version__ = '.'.join(map(str, __version_info__)) From d15dd85b94f8899ee55d6be3bffaee3ebfe70443 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 30 Mar 2015 15:04:12 -0500 Subject: [PATCH 1310/3726] post-release version update --- cassandra/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index fa26054fc2..296601cbf1 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -23,7 +23,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (2, 5, 0) +__version_info__ = (2, 5, 0, 'post') __version__ = '.'.join(map(str, __version_info__)) From 7a6e72d79177ed3ae8a551bca488dacdf098abb9 Mon Sep 17 00:00:00 2001 From: Stefania Alborghetti Date: Fri, 20 Mar 2015 11:35:13 +0800 Subject: [PATCH 1311/3726] CASSANDRA-7814: Added indexes to table and ks metadata and export_as_string() to IndexMetadata --- cassandra/metadata.py | 43 +++++++++++++++++++++++++++++++++++++++---- 1 file changed, 39 insertions(+), 4 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 1f88d8f9e6..9b01a18c8e 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -122,10 +122,9 @@ def rebuild_schema(self, ks_results, type_results, cf_results, col_results, trig keyspace_col_rows = col_def_rows.get(keyspace_meta.name, {}) keyspace_trigger_rows = trigger_rows.get(keyspace_meta.name, {}) for table_row in cf_def_rows.get(keyspace_meta.name, []): - table_meta = self._build_table_metadata( + self._add_table_metadata_to_ks( keyspace_meta, table_row, keyspace_col_rows, keyspace_trigger_rows) - keyspace_meta.tables[table_meta.name] = table_meta for usertype_row in usertype_rows.get(keyspace_meta.name, []): usertype = self._build_usertype(keyspace_meta.name, usertype_row) @@ -184,10 +183,12 @@ def table_changed(self, keyspace, table, cf_results, col_results, triggers_resul if not cf_results: # the table was removed - keyspace_meta.tables.pop(table, None) + table_meta = keyspace_meta.tables.pop(table, None) + if table_meta: + self._clear_table_indexes_in_ks(keyspace_meta, table_meta.name) else: assert len(cf_results) == 1 - keyspace_meta.tables[table] = self._build_table_metadata( + self._add_table_metadata_to_ks( keyspace_meta, cf_results[0], {table: col_results}, {table: triggers_result}) @@ -215,6 +216,20 @@ def _build_usertype(self, keyspace, usertype_row): return UserType(usertype_row['keyspace_name'], usertype_row['type_name'], usertype_row['field_names'], type_classes) + def _add_table_metadata_to_ks(self, keyspace_metadata, row, col_rows, trigger_rows): + self._clear_table_indexes_in_ks(keyspace_metadata, row["columnfamily_name"]) + + table_metadata = self._build_table_metadata(keyspace_metadata, row, col_rows, trigger_rows) + keyspace_metadata.tables[table_metadata.name] = table_metadata + for index_name, index_metadata in table_metadata.indexes.iteritems(): + keyspace_metadata.indexes[index_name] = index_metadata + + def _clear_table_indexes_in_ks(self, keyspace_metadata, table_name): + if table_name in keyspace_metadata.tables: + table_meta = keyspace_metadata.tables[table_name] + for index_name in table_meta.indexes: + keyspace_metadata.indexes.pop(index_name, None) + def _build_table_metadata(self, keyspace_metadata, row, col_rows, trigger_rows): cfname = row["columnfamily_name"] cf_col_rows = col_rows.get(cfname, []) @@ -385,6 +400,8 @@ def _build_column_metadata(self, table_metadata, row): column_meta = ColumnMetadata(table_metadata, name, data_type, is_static=is_static) index_meta = self._build_index_metadata(column_meta, row) column_meta.index = index_meta + if index_meta: + table_metadata.indexes[index_meta.name] = index_meta return column_meta def _build_index_metadata(self, column_metadata, row): @@ -733,6 +750,11 @@ class KeyspaceMetadata(object): A map from table names to instances of :class:`~.TableMetadata`. """ + indexes = None + """ + A dict mapping index names to :class:`.IndexMetadata` instances. + """ + user_types = None """ A map from user-defined type names to instances of :class:`~cassandra.metadata..UserType`. @@ -745,6 +767,7 @@ def __init__(self, name, durable_writes, strategy_class, strategy_options): self.durable_writes = durable_writes self.replication_strategy = ReplicationStrategy.create(strategy_class, strategy_options) self.tables = {} + self.indexes = {} self.user_types = {} def export_as_string(self): @@ -884,6 +907,11 @@ def primary_key(self): A dict mapping column names to :class:`.ColumnMetadata` instances. """ + indexes = None + """ + A dict mapping index names to :class:`.IndexMetadata` instances. + """ + is_compact_storage = False options = None @@ -945,6 +973,7 @@ def __init__(self, keyspace_metadata, name, partition_key=None, clustering_key=N self.partition_key = [] if partition_key is None else partition_key self.clustering_key = [] if clustering_key is None else clustering_key self.columns = OrderedDict() if columns is None else columns + self.indexes = {} self.options = options self.comparator = None self.triggers = OrderedDict() if triggers is None else triggers @@ -1242,6 +1271,12 @@ def as_cql_query(self): protect_name(self.column.name), self.index_options["class_name"]) + def export_as_string(self): + """ + Returns a CQL query string that can be used to recreate this index. + """ + return self.as_cql_query() + ';' + class TokenMap(object): """ From 5dcc569c98a75c1746ea4adcb8c338e7c984540c Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 1 Apr 2015 11:12:42 -0500 Subject: [PATCH 1312/3726] Better documentation around timeouts and retries PYTHON-74 --- cassandra/cluster.py | 25 ++++++++++++++++++------- cassandra/policies.py | 9 +++++++-- 2 files changed, 25 insertions(+), 9 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index e523e38162..9aa5912b64 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1259,10 +1259,8 @@ class Session(object): Setting this to :const:`None` will cause no timeouts to be set by default. - **Important**: This timeout currently has no effect on callbacks registered - on a :class:`~.ResponseFuture` through :meth:`.ResponseFuture.add_callback` or - :meth:`.ResponseFuture.add_errback`; even if a query exceeds this default - timeout, neither the registered callback or errback will be called. + Please see :meth:`.ResponseFuture.result` for details on the scope and + effect of this timeout. .. versionadded:: 2.0.0 """ @@ -1381,7 +1379,8 @@ def execute(self, query, parameters=None, timeout=_NOT_SET, trace=False): which an :exc:`.OperationTimedOut` exception will be raised if the query has not completed. If not set, the timeout defaults to :attr:`~.Session.default_timeout`. If set to :const:`None`, there is - no timeout. + no timeout. Please see :meth:`.ResponseFuture.result` for details on + the scope and effect of this timeout. If `trace` is set to :const:`True`, an attempt will be made to fetch the trace details and attach them to the `query`'s @@ -2937,8 +2936,20 @@ def result(self, timeout=_NOT_SET): You may set a timeout (in seconds) with the `timeout` parameter. By default, the :attr:`~.default_timeout` for the :class:`.Session` this was created through will be used for the timeout on this - operation. If the timeout is exceeded, an - :exc:`cassandra.OperationTimedOut` will be raised. + operation. + + This timeout applies to the entire request, including any retries + (decided internally by the :class:`.policies.RetryPolicy` used with + the request). + + If the timeout is exceeded, an :exc:`cassandra.OperationTimedOut` will be raised. + This is a client-side timeout. For more information + about server-side coordinator timeouts, see :class:`.policies.RetryPolicy`. + + **Important**: This timeout currently has no effect on callbacks registered + on a :class:`~.ResponseFuture` through :meth:`.ResponseFuture.add_callback` or + :meth:`.ResponseFuture.add_errback`; even if a query exceeds this default + timeout, neither the registered callback or errback will be called. Example usage:: diff --git a/cassandra/policies.py b/cassandra/policies.py index 0dc36af76d..9eb76c16d9 100644 --- a/cassandra/policies.py +++ b/cassandra/policies.py @@ -597,8 +597,13 @@ class WriteType(object): class RetryPolicy(object): """ - A policy that describes whether to retry, rethrow, or ignore timeout - and unavailable failures. + A policy that describes whether to retry, rethrow, or ignore coordinator + timeout and unavailable failures. These are failures reported from the + server side. Timeouts are configured by + `settings in cassandra.yaml `_. + Unavailable failures occur when the coordinator cannot acheive the consistency + level for a request. For further information see the method descriptions + below. To specify a default retry policy, set the :attr:`.Cluster.default_retry_policy` attribute to an instance of this From 521ab1a116f25bbb34a33b278c897eec03018454 Mon Sep 17 00:00:00 2001 From: Kishan Karunaratne Date: Thu, 2 Apr 2015 20:49:48 -0700 Subject: [PATCH 1313/3726] refactored MetricsTests --- tests/integration/standard/test_metrics.py | 125 +++++++++++---------- 1 file changed, 64 insertions(+), 61 deletions(-) diff --git a/tests/integration/standard/test_metrics.py b/tests/integration/standard/test_metrics.py index 674727eae6..1996495c0b 100644 --- a/tests/integration/standard/test_metrics.py +++ b/tests/integration/standard/test_metrics.py @@ -35,27 +35,28 @@ class MetricsTests(unittest.TestCase): def test_connection_error(self): """ Trigger and ensure connection_errors are counted + Stop all node with the driver knowing about the "DOWN" states. """ - cluster = Cluster(metrics_enabled=True, - protocol_version=PROTOCOL_VERSION) - session = cluster.connect() - session.execute("USE test3rf") + cluster = Cluster(metrics_enabled=True, protocol_version=PROTOCOL_VERSION) + session = cluster.connect("test3rf") # Test writes for i in range(0, 100): - session.execute_async( - """ - INSERT INTO test3rf.test (k, v) VALUES (%s, %s) - """ % (i, i)) + session.execute_async("INSERT INTO test (k, v) VALUES ({0}, {1})".format(i, i)) - # Force kill cluster + # Stop the cluster get_cluster().stop(wait=True, gently=False) + try: # Ensure the nodes are actually down - self.assertRaises(NoHostAvailable, session.execute, "USE test3rf") + query = SimpleStatement("SELECT * FROM test", consistency_level=ConsistencyLevel.ALL) + with self.assertRaises(NoHostAvailable): + session.execute(query) finally: get_cluster().start(wait_for_binary_proto=True, wait_other_notice=True) + # Give some time for the cluster to come back up, for the next test + time.sleep(5) self.assertGreater(cluster.metrics.stats.connection_errors, 0) cluster.shutdown() @@ -63,115 +64,117 @@ def test_connection_error(self): def test_write_timeout(self): """ Trigger and ensure write_timeouts are counted - Write a key, value pair. Force kill a node without waiting for the cluster to register the death. + Write a key, value pair. Pause a node without the coordinator node knowing about the "DOWN" state. Attempt a write at cl.ALL and receive a WriteTimeout. """ - cluster = Cluster(metrics_enabled=True, - protocol_version=PROTOCOL_VERSION) - session = cluster.connect() + cluster = Cluster(metrics_enabled=True, protocol_version=PROTOCOL_VERSION) + session = cluster.connect("test3rf") # Test write - session.execute("INSERT INTO test3rf.test (k, v) VALUES (1, 1)") + session.execute("INSERT INTO test (k, v) VALUES (1, 1)") # Assert read - query = SimpleStatement("SELECT v FROM test3rf.test WHERE k=%(k)s", consistency_level=ConsistencyLevel.ALL) - results = session.execute(query, {'k': 1}) - self.assertEqual(1, results[0].v) + query = SimpleStatement("SELECT * FROM test WHERE k=1", consistency_level=ConsistencyLevel.ALL) + results = session.execute(query) + self.assertEqual(1, len(results)) - # Force kill ccm node - get_node(1).stop(wait=False, gently=False) + # Pause node so it shows as unreachable to coordinator + get_node(1).pause() try: # Test write - query = SimpleStatement("INSERT INTO test3rf.test (k, v) VALUES (2, 2)", consistency_level=ConsistencyLevel.ALL) - self.assertRaises(WriteTimeout, session.execute, query, timeout=None) + query = SimpleStatement("INSERT INTO test (k, v) VALUES (2, 2)", consistency_level=ConsistencyLevel.ALL) + with self.assertRaises(WriteTimeout): + session.execute(query) self.assertEqual(1, cluster.metrics.stats.write_timeouts) finally: - get_node(1).start(wait_other_notice=True, wait_for_binary_proto=True) + get_node(1).resume() cluster.shutdown() def test_read_timeout(self): """ Trigger and ensure read_timeouts are counted - Write a key, value pair. Force kill a node without waiting for the cluster to register the death. + Write a key, value pair. Pause a node without the coordinator node knowing about the "DOWN" state. Attempt a read at cl.ALL and receive a ReadTimeout. """ - cluster = Cluster(metrics_enabled=True, - protocol_version=PROTOCOL_VERSION) - session = cluster.connect() + cluster = Cluster(metrics_enabled=True, protocol_version=PROTOCOL_VERSION) + session = cluster.connect("test3rf") # Test write - session.execute("INSERT INTO test3rf.test (k, v) VALUES (1, 1)") + session.execute("INSERT INTO test (k, v) VALUES (1, 1)") # Assert read - query = SimpleStatement("SELECT v FROM test3rf.test WHERE k=%(k)s", consistency_level=ConsistencyLevel.ALL) - results = session.execute(query, {'k': 1}) - self.assertEqual(1, results[0].v) + query = SimpleStatement("SELECT * FROM test WHERE k=1", consistency_level=ConsistencyLevel.ALL) + results = session.execute(query) + self.assertEqual(1, len(results)) - # Force kill ccm node - get_node(1).stop(wait=False, gently=False) + # Pause node so it shows as unreachable to coordinator + get_node(1).pause() try: # Test read - query = SimpleStatement("SELECT v FROM test3rf.test WHERE k=%(k)s", consistency_level=ConsistencyLevel.ALL) - self.assertRaises(ReadTimeout, session.execute, query, {'k': 1}, timeout=None) + query = SimpleStatement("SELECT * FROM test", consistency_level=ConsistencyLevel.ALL) + with self.assertRaises(ReadTimeout): + session.execute(query, timeout=None) self.assertEqual(1, cluster.metrics.stats.read_timeouts) finally: - get_node(1).start(wait_other_notice=True, wait_for_binary_proto=True) + get_node(1).resume() cluster.shutdown() def test_unavailable(self): """ Trigger and ensure unavailables are counted - Write a key, value pair. Kill a node while waiting for the cluster to register the death. + Write a key, value pair. Stop a node with the coordinator node knowing about the "DOWN" state. Attempt an insert/read at cl.ALL and receive a Unavailable Exception. """ - cluster = Cluster(metrics_enabled=True, - protocol_version=PROTOCOL_VERSION) - session = cluster.connect() + cluster = Cluster(metrics_enabled=True, protocol_version=PROTOCOL_VERSION) + session = cluster.connect("test3rf") # Test write - session.execute("INSERT INTO test3rf.test (k, v) VALUES (1, 1)") + session.execute("INSERT INTO test (k, v) VALUES (1, 1)") # Assert read - query = SimpleStatement("SELECT v FROM test3rf.test WHERE k=%(k)s", consistency_level=ConsistencyLevel.ALL) - results = session.execute(query, {'k': 1}) - self.assertEqual(1, results[0].v) + query = SimpleStatement("SELECT * FROM test WHERE k=1", consistency_level=ConsistencyLevel.ALL) + results = session.execute(query) + self.assertEqual(1, len(results)) - # Force kill ccm node + # Stop node gracefully get_node(1).stop(wait=True, gently=True) - time.sleep(5) try: # Test write - query = SimpleStatement("INSERT INTO test3rf.test (k, v) VALUES (2, 2)", consistency_level=ConsistencyLevel.ALL) - self.assertRaises(Unavailable, session.execute, query) + query = SimpleStatement("INSERT INTO test (k, v) VALUES (2, 2)", consistency_level=ConsistencyLevel.ALL) + with self.assertRaises(Unavailable): + session.execute(query) self.assertEqual(1, cluster.metrics.stats.unavailables) # Test write - query = SimpleStatement("SELECT v FROM test3rf.test WHERE k=%(k)s", consistency_level=ConsistencyLevel.ALL) - self.assertRaises(Unavailable, session.execute, query, {'k': 1}) + query = SimpleStatement("SELECT * FROM test", consistency_level=ConsistencyLevel.ALL) + with self.assertRaises(Unavailable): + session.execute(query, timeout=None) self.assertEqual(2, cluster.metrics.stats.unavailables) finally: get_node(1).start(wait_other_notice=True, wait_for_binary_proto=True) + # Give some time for the cluster to come back up, for the next test + time.sleep(5) cluster.shutdown() - def test_other_error(self): - # TODO: Bootstrapping or Overloaded cases - pass - - def test_ignore(self): - # TODO: Look for ways to generate ignores - pass - - def test_retry(self): - # TODO: Look for ways to generate retries - pass + # def test_other_error(self): + # # TODO: Bootstrapping or Overloaded cases + # pass + # + # def test_ignore(self): + # # TODO: Look for ways to generate ignores + # pass + # + # def test_retry(self): + # # TODO: Look for ways to generate retries + # pass From 73ec60f606d793200b360bcac314aecaec3bf9b9 Mon Sep 17 00:00:00 2001 From: Kishan Karunaratne Date: Tue, 7 Apr 2015 13:27:16 -0700 Subject: [PATCH 1314/3726] Refactored TypeTests --- tests/integration/datatype_utils.py | 75 +-- tests/integration/standard/test_types.py | 685 +++++++++++------------ 2 files changed, 369 insertions(+), 391 deletions(-) diff --git a/tests/integration/datatype_utils.py b/tests/integration/datatype_utils.py index 41a4c09e01..bd76c36fec 100644 --- a/tests/integration/datatype_utils.py +++ b/tests/integration/datatype_utils.py @@ -11,22 +11,26 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + from decimal import Decimal -import datetime -from uuid import UUID -import pytz +from datetime import datetime, date, time +from uuid import uuid1, uuid4 try: from blist import sortedset except ImportError: sortedset = set # noqa -DATA_TYPE_PRIMITIVES = [ +from cassandra.util import OrderedMap + +from tests.integration import get_server_versions + + +PRIMITIVE_DATATYPES = [ 'ascii', 'bigint', 'blob', 'boolean', - # 'counter', counters are not allowed inside tuples 'decimal', 'double', 'float', @@ -40,22 +44,28 @@ 'varint', ] -DATA_TYPE_NON_PRIMITIVE_NAMES = [ +COLLECTION_TYPES = [ 'list', 'set', 'map', - 'tuple' ] -def get_sample_data(): - """ - Create a standard set of sample inputs for testing. - """ +def update_datatypes(): + _cass_version, _cql_version = get_server_versions() + + if _cass_version >= (2, 1, 0): + COLLECTION_TYPES.append('tuple') + + if _cass_version >= (2, 1, 5): + PRIMITIVE_DATATYPES.append('date') + PRIMITIVE_DATATYPES.append('time') + +def get_sample_data(): sample_data = {} - for datatype in DATA_TYPE_PRIMITIVES: + for datatype in PRIMITIVE_DATATYPES: if datatype == 'ascii': sample_data[datatype] = 'ascii' @@ -68,10 +78,6 @@ def get_sample_data(): elif datatype == 'boolean': sample_data[datatype] = True - elif datatype == 'counter': - # Not supported in an insert statement - pass - elif datatype == 'decimal': sample_data[datatype] = Decimal('12.3E+7') @@ -91,13 +97,13 @@ def get_sample_data(): sample_data[datatype] = 'text' elif datatype == 'timestamp': - sample_data[datatype] = datetime.datetime.fromtimestamp(872835240, tz=pytz.timezone('America/New_York')).astimezone(pytz.UTC).replace(tzinfo=None) + sample_data[datatype] = datetime(2013, 12, 31, 23, 59, 59, 999000) elif datatype == 'timeuuid': - sample_data[datatype] = UUID('FE2B4360-28C6-11E2-81C1-0800200C9A66') + sample_data[datatype] = uuid1() elif datatype == 'uuid': - sample_data[datatype] = UUID('067e6162-3b6f-4ae2-a171-2470b63dff00') + sample_data[datatype] = uuid4() elif datatype == 'varchar': sample_data[datatype] = 'varchar' @@ -105,35 +111,40 @@ def get_sample_data(): elif datatype == 'varint': sample_data[datatype] = int(str(2147483647) + '000') + elif datatype == 'date': + sample_data[datatype] = date(2015, 1, 15) + + elif datatype == 'time': + sample_data[datatype] = time(16, 47, 25, 7) + else: - raise Exception('Missing handling of %s.' % datatype) + raise Exception("Missing handling of {0}".format(datatype)) return sample_data SAMPLE_DATA = get_sample_data() + def get_sample(datatype): """ - Helper method to access created sample data + Helper method to access created sample data for primitive types """ return SAMPLE_DATA[datatype] -def get_nonprim_sample(non_prim_type, datatype): + +def get_collection_sample(collection_type, datatype): """ - Helper method to access created sample data for non-primitives + Helper method to access created sample data for collection types """ - if non_prim_type == 'list': + if collection_type == 'list': return [get_sample(datatype), get_sample(datatype)] - elif non_prim_type == 'set': + elif collection_type == 'set': return sortedset([get_sample(datatype)]) - elif non_prim_type == 'map': - if datatype == 'blob': - return {get_sample('ascii'): get_sample(datatype)} - else: - return {get_sample(datatype): get_sample(datatype)} - elif non_prim_type == 'tuple': + elif collection_type == 'map': + return OrderedMap([(get_sample(datatype), get_sample(datatype))]) + elif collection_type == 'tuple': return (get_sample(datatype),) else: - raise Exception('Missing handling of non-primitive type {0}.'.format(non_prim_type)) + raise Exception('Missing handling of non-primitive type {0}.'.format(collection_type)) diff --git a/tests/integration/standard/test_types.py b/tests/integration/standard/test_types.py index 2191db1f41..b256e87595 100644 --- a/tests/integration/standard/test_types.py +++ b/tests/integration/standard/test_types.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from tests.integration.datatype_utils import get_sample, DATA_TYPE_PRIMITIVES, DATA_TYPE_NON_PRIMITIVE_NAMES try: import unittest2 as unittest @@ -21,85 +20,53 @@ import logging log = logging.getLogger(__name__) -from collections import namedtuple -from decimal import Decimal -from datetime import datetime, date, time -from functools import partial +from datetime import datetime import six -from uuid import uuid1, uuid4 from cassandra import InvalidRequest from cassandra.cluster import Cluster from cassandra.cqltypes import Int32Type, EMPTY -from cassandra.query import dict_factory -from cassandra.util import OrderedMap, sortedset +from cassandra.query import dict_factory, ordered_dict_factory +from cassandra.util import sortedset from tests.integration import get_server_versions, use_singledc, PROTOCOL_VERSION - -# defined in module scope for pickling in OrderedMap -nested_collection_udt = namedtuple('nested_collection_udt', ['m', 't', 'l', 's']) -nested_collection_udt_nested = namedtuple('nested_collection_udt_nested', ['m', 't', 'l', 's', 'u']) +from tests.integration.datatype_utils import update_datatypes, PRIMITIVE_DATATYPES, COLLECTION_TYPES, \ + get_sample, get_collection_sample def setup_module(): use_singledc() + update_datatypes() class TypeTests(unittest.TestCase): - _types_table_created = False - - @classmethod - def setup_class(cls): - cls._cass_version, cls._cql_version = get_server_versions() - - cls._col_types = ['text', - 'ascii', - 'bigint', - 'boolean', - 'decimal', - 'double', - 'float', - 'inet', - 'int', - 'list', - 'set', - 'map', - 'timestamp', - 'uuid', - 'timeuuid', - 'varchar', - 'varint'] - - if cls._cass_version >= (2, 1, 4): - cls._col_types.extend(('date', 'time')) - - cls._session = Cluster(protocol_version=PROTOCOL_VERSION).connect() - cls._session.execute("CREATE KEYSPACE typetests WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1'}") - cls._session.set_keyspace("typetests") - - @classmethod - def teardown_class(cls): - cls._session.execute("DROP KEYSPACE typetests") - cls._session.cluster.shutdown() - - def test_blob_type_as_string(self): - s = self._session + def setUp(self): + self._cass_version, self._cql_version = get_server_versions() - s.execute(""" - CREATE TABLE blobstring ( - a ascii, - b blob, - PRIMARY KEY (a) - ) - """) + self.cluster = Cluster(protocol_version=PROTOCOL_VERSION) + self.session = self.cluster.connect() + self.session.execute("CREATE KEYSPACE typetests WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1'}") + self.cluster.shutdown() + + def tearDown(self): + self.cluster = Cluster(protocol_version=PROTOCOL_VERSION) + self.session = self.cluster.connect() + self.session.execute("DROP KEYSPACE typetests") + self.cluster.shutdown() + + def test_can_insert_blob_type_as_string(self): + """ + Tests that blob type in Cassandra does not map to string in Python + """ - params = [ - 'key1', - b'blobyblob' - ] + c = Cluster(protocol_version=PROTOCOL_VERSION) + s = c.connect("typetests") - query = 'INSERT INTO blobstring (a, b) VALUES (%s, %s)' + s.execute("CREATE TABLE blobstring (a ascii PRIMARY KEY, b blob)") + + params = ['key1', b'blobyblob'] + query = "INSERT INTO blobstring (a, b) VALUES (%s, %s)" # In python 3, the 'bytes' type is treated as a blob, so we can # correctly encode it with hex notation. @@ -118,243 +85,277 @@ def test_blob_type_as_string(self): params[1] = params[1].encode('hex') s.execute(query, params) - expected_vals = [ - 'key1', - bytearray(b'blobyblob') - ] - - results = s.execute("SELECT * FROM blobstring") - for expected, actual in zip(expected_vals, results[0]): + results = s.execute("SELECT * FROM blobstring")[0] + for expected, actual in zip(params, results): self.assertEqual(expected, actual) - def test_blob_type_as_bytearray(self): - s = self._session - s.execute(""" - CREATE TABLE blobbytes ( - a ascii, - b blob, - PRIMARY KEY (a) - ) - """) - - params = [ - 'key1', - bytearray(b'blob1') - ] - - query = 'INSERT INTO blobbytes (a, b) VALUES (%s, %s);' - s.execute(query, params) + c.shutdown() - expected_vals = [ - 'key1', - bytearray(b'blob1') - ] + def test_can_insert_blob_type_as_bytearray(self): + """ + Tests that blob type in Cassandra maps to bytearray in Python + """ + + c = Cluster(protocol_version=PROTOCOL_VERSION) + s = c.connect("typetests") + + s.execute("CREATE TABLE blobbytes (a ascii PRIMARY KEY, b blob)") - results = s.execute("SELECT * FROM blobbytes") + params = ['key1', bytearray(b'blob1')] + s.execute("INSERT INTO blobbytes (a, b) VALUES (%s, %s)", params) - for expected, actual in zip(expected_vals, results[0]): + results = s.execute("SELECT * FROM blobbytes")[0] + for expected, actual in zip(params, results): self.assertEqual(expected, actual) - def _create_all_types_table(self): - if not self._types_table_created: - TypeTests._col_names = ["%s_col" % col_type.translate(None, '<> ,') for col_type in self._col_types] - cql = "CREATE TABLE alltypes ( key int PRIMARY KEY, %s)" % ','.join("%s %s" % name_type for name_type in zip(self._col_names, self._col_types)) - self._session.execute(cql) - TypeTests._types_table_created = True - - def test_basic_types(self): - - s = self._session.cluster.connect() - s.set_keyspace(self._session.keyspace) - - self._create_all_types_table() - - v1_uuid = uuid1() - v4_uuid = uuid4() - mydatetime = datetime(2013, 12, 31, 23, 59, 59, 999000) - - # this could use some rework tying column types to names (instead of relying on position) - params = [ - "text", - "ascii", - 12345678923456789, # bigint - True, # boolean - Decimal('1.234567890123456789'), # decimal - 0.000244140625, # double - 1.25, # float - "1.2.3.4", # inet - 12345, # int - ['a', 'b', 'c'], # list collection - set([1, 2, 3]), # set collection - {'a': 1, 'b': 2}, # map collection - mydatetime, # timestamp - v4_uuid, # uuid - v1_uuid, # timeuuid - u"sometext\u1234", # varchar - 123456789123456789123456789, # varint - ] - - expected_vals = [ - "text", - "ascii", - 12345678923456789, # bigint - True, # boolean - Decimal('1.234567890123456789'), # decimal - 0.000244140625, # double - 1.25, # float - "1.2.3.4", # inet - 12345, # int - ['a', 'b', 'c'], # list collection - sortedset((1, 2, 3)), # set collection - {'a': 1, 'b': 2}, # map collection - mydatetime, # timestamp - v4_uuid, # uuid - v1_uuid, # timeuuid - u"sometext\u1234", # varchar - 123456789123456789123456789, # varint - ] - - if self._cass_version >= (2, 1, 4): - mydate = date(2015, 1, 15) - mytime = time(16, 47, 25, 7) - - params.append(mydate) - params.append(mytime) - - expected_vals.append(mydate) - expected_vals.append(mytime) - - columns_string = ','.join(self._col_names) - placeholders = ', '.join(["%s"] * len(self._col_names)) - s.execute("INSERT INTO alltypes (key, %s) VALUES (0, %s)" % - (columns_string, placeholders), params) - - results = s.execute("SELECT %s FROM alltypes WHERE key=0" % columns_string) - - for expected, actual in zip(expected_vals, results[0]): + c.shutdown() + + def test_can_insert_primitive_datatypes(self): + """ + Test insertion of all datatype primitives + """ + + c = Cluster(protocol_version=PROTOCOL_VERSION) + s = c.connect("typetests") + + # create table + alpha_type_list = ["zz int PRIMARY KEY"] + col_names = ["zz"] + start_index = ord('a') + for i, datatype in enumerate(PRIMITIVE_DATATYPES): + alpha_type_list.append("{0} {1}".format(chr(start_index + i), datatype)) + col_names.append(chr(start_index + i)) + + s.execute("CREATE TABLE alltypes ({0})".format(', '.join(alpha_type_list))) + + # create the input + params = [0] + for datatype in PRIMITIVE_DATATYPES: + params.append((get_sample(datatype))) + + # insert into table as a simple statement + columns_string = ', '.join(col_names) + placeholders = ', '.join(["%s"] * len(col_names)) + s.execute("INSERT INTO alltypes ({0}) VALUES ({1})".format(columns_string, placeholders), params) + + # verify data + results = s.execute("SELECT {0} FROM alltypes WHERE zz=0".format(columns_string))[0] + for expected, actual in zip(params, results): self.assertEqual(actual, expected) # try the same thing with a prepared statement - placeholders = ','.join(["?"] * len(self._col_names)) - prepared = s.prepare("INSERT INTO alltypes (key, %s) VALUES (1, %s)" % - (columns_string, placeholders)) - s.execute(prepared.bind(params)) + placeholders = ','.join(["?"] * len(col_names)) + insert = s.prepare("INSERT INTO alltypes ({0}) VALUES ({1})".format(columns_string, placeholders)) + s.execute(insert.bind(params)) + + # verify data + results = s.execute("SELECT {0} FROM alltypes WHERE zz=0".format(columns_string))[0] + for expected, actual in zip(params, results): + self.assertEqual(actual, expected) + + # verify data with prepared statement query + select = s.prepare("SELECT {0} FROM alltypes WHERE zz=?".format(columns_string)) + results = s.execute(select.bind([0]))[0] + for expected, actual in zip(params, results): + self.assertEqual(actual, expected) - results = s.execute("SELECT %s FROM alltypes WHERE key=1" % columns_string) + # verify data with with prepared statement, use dictionary with no explicit columns + s.row_factory = ordered_dict_factory + select = s.prepare("SELECT * FROM alltypes") + results = s.execute(select)[0] - for expected, actual in zip(expected_vals, results[0]): + for expected, actual in zip(params, results.values()): self.assertEqual(actual, expected) - # query with prepared statement - prepared = s.prepare("SELECT %s FROM alltypes WHERE key=?" % columns_string) - results = s.execute(prepared.bind((1,))) + c.shutdown() + + def test_can_insert_collection_datatypes(self): + """ + Test insertion of all collection types + """ - for expected, actual in zip(expected_vals, results[0]): + c = Cluster(protocol_version=PROTOCOL_VERSION) + s = c.connect("typetests") + # use tuple encoding, to convert native python tuple into raw CQL + s.encoder.mapping[tuple] = s.encoder.cql_encode_tuple + + # create table + alpha_type_list = ["zz int PRIMARY KEY"] + col_names = ["zz"] + start_index = ord('a') + for i, collection_type in enumerate(COLLECTION_TYPES): + for j, datatype in enumerate(PRIMITIVE_DATATYPES): + if collection_type == "map": + type_string = "{0}_{1} {2}<{3}, {3}>".format(chr(start_index + i), chr(start_index + j), + collection_type, datatype) + elif collection_type == "tuple": + type_string = "{0}_{1} frozen<{2}<{3}>>".format(chr(start_index + i), chr(start_index + j), + collection_type, datatype) + else: + type_string = "{0}_{1} {2}<{3}>".format(chr(start_index + i), chr(start_index + j), + collection_type, datatype) + alpha_type_list.append(type_string) + col_names.append("{0}_{1}".format(chr(start_index + i), chr(start_index + j))) + + s.execute("CREATE TABLE allcoltypes ({0})".format(', '.join(alpha_type_list))) + columns_string = ', '.join(col_names) + + # create the input for simple statement + params = [0] + for collection_type in COLLECTION_TYPES: + for datatype in PRIMITIVE_DATATYPES: + params.append((get_collection_sample(collection_type, datatype))) + + # insert into table as a simple statement + placeholders = ', '.join(["%s"] * len(col_names)) + s.execute("INSERT INTO allcoltypes ({0}) VALUES ({1})".format(columns_string, placeholders), params) + + # verify data + results = s.execute("SELECT {0} FROM allcoltypes WHERE zz=0".format(columns_string))[0] + for expected, actual in zip(params, results): self.assertEqual(actual, expected) - # query with prepared statement, no explicit columns - s.row_factory = dict_factory - prepared = s.prepare("SELECT * FROM alltypes") - results = s.execute(prepared.bind(())) + # create the input for prepared statement + params = [0] + for collection_type in COLLECTION_TYPES: + for datatype in PRIMITIVE_DATATYPES: + params.append((get_collection_sample(collection_type, datatype))) - row = results[0] - for expected, name in zip(expected_vals, self._col_names): - self.assertEqual(row[name], expected) + # try the same thing with a prepared statement + placeholders = ','.join(["?"] * len(col_names)) + insert = s.prepare("INSERT INTO allcoltypes ({0}) VALUES ({1})".format(columns_string, placeholders)) + s.execute(insert.bind(params)) - s.shutdown() + # verify data + results = s.execute("SELECT {0} FROM allcoltypes WHERE zz=0".format(columns_string))[0] + for expected, actual in zip(params, results): + self.assertEqual(actual, expected) - def test_empty_strings_and_nones(self): - s = self._session.cluster.connect() - s.set_keyspace(self._session.keyspace) - s.row_factory = dict_factory + # verify data with prepared statement query + select = s.prepare("SELECT {0} FROM allcoltypes WHERE zz=?".format(columns_string)) + results = s.execute(select.bind([0]))[0] + for expected, actual in zip(params, results): + self.assertEqual(actual, expected) + + # verify data with with prepared statement, use dictionary with no explicit columns + s.row_factory = ordered_dict_factory + select = s.prepare("SELECT * FROM allcoltypes") + results = s.execute(select)[0] - self._create_all_types_table() + for expected, actual in zip(params, results.values()): + self.assertEqual(actual, expected) - columns_string = ','.join(self._col_names) - s.execute("INSERT INTO alltypes (key) VALUES (2)") - results = s.execute("SELECT %s FROM alltypes WHERE key=2" % columns_string) - self.assertTrue(all(x is None for x in results[0].values())) + c.shutdown() - prepared = s.prepare("SELECT %s FROM alltypes WHERE key=?" % columns_string) - results = s.execute(prepared.bind((2,))) - self.assertTrue(all(x is None for x in results[0].values())) + def test_can_insert_empty_strings_and_nulls(self): + """ + Test insertion of empty strings and null values + """ - # insert empty strings for string-like fields and fetch them - expected_values = {'text_col': '', 'ascii_col': '', 'varchar_col': '', 'listtext_col': [''], 'maptextint_col': OrderedMap({'': 3})} + c = Cluster(protocol_version=PROTOCOL_VERSION) + s = c.connect("typetests") + + # create table + alpha_type_list = ["zz int PRIMARY KEY"] + col_names = [] + start_index = ord('a') + for i, datatype in enumerate(PRIMITIVE_DATATYPES): + alpha_type_list.append("{0} {1}".format(chr(start_index + i), datatype)) + col_names.append(chr(start_index + i)) + + s.execute("CREATE TABLE alltypes ({0})".format(', '.join(alpha_type_list))) + + # verify all types initially null with simple statement + columns_string = ','.join(col_names) + s.execute("INSERT INTO alltypes (zz) VALUES (2)") + results = s.execute("SELECT {0} FROM alltypes WHERE zz=2".format(columns_string))[0] + self.assertTrue(all(x is None for x in results)) + + # verify all types initially null with prepared statement + select = s.prepare("SELECT {0} FROM alltypes WHERE zz=?".format(columns_string)) + results = s.execute(select.bind([2]))[0] + self.assertTrue(all(x is None for x in results)) + + # insert empty strings for string-like fields + expected_values = {'j': '', 'a': '', 'n': ''} columns_string = ','.join(expected_values.keys()) placeholders = ','.join(["%s"] * len(expected_values)) - s.execute("INSERT INTO alltypes (key, %s) VALUES (3, %s)" % (columns_string, placeholders), expected_values.values()) - self.assertEqual(expected_values, - s.execute("SELECT %s FROM alltypes WHERE key=3" % columns_string)[0]) - self.assertEqual(expected_values, - s.execute(s.prepare("SELECT %s FROM alltypes WHERE key=?" % columns_string), (3,))[0]) + s.execute("INSERT INTO alltypes (zz, {0}) VALUES (3, {1})".format(columns_string, placeholders), expected_values.values()) + + # verify string types empty with simple statement + results = s.execute("SELECT {0} FROM alltypes WHERE zz=3".format(columns_string))[0] + for expected, actual in zip(expected_values.values(), results): + self.assertEqual(actual, expected) + + # verify string types empty with prepared statement + results = s.execute(s.prepare("SELECT {0} FROM alltypes WHERE zz=?".format(columns_string)), [3])[0] + for expected, actual in zip(expected_values.values(), results): + self.assertEqual(actual, expected) # non-string types shouldn't accept empty strings - for col in ('bigint_col', 'boolean_col', 'decimal_col', 'double_col', - 'float_col', 'int_col', 'listtext_col', 'setint_col', - 'maptextint_col', 'uuid_col', 'timeuuid_col', 'varint_col'): - query = "INSERT INTO alltypes (key, %s) VALUES (4, %%s)" % col - try: + for col in ('b', 'd', 'e', 'f', 'g', 'i', 'l', 'm', 'o'): + query = "INSERT INTO alltypes (zz, {0}) VALUES (4, %s)".format(col) + with self.assertRaises(InvalidRequest): s.execute(query, ['']) - except InvalidRequest: - pass - else: - self.fail("Expected an InvalidRequest error when inserting an " - "emptry string for column %s" % (col, )) - - prepared = s.prepare("INSERT INTO alltypes (key, %s) VALUES (4, ?)" % col) - try: - s.execute(prepared, ['']) - except TypeError: - pass - else: - self.fail("Expected an InvalidRequest error when inserting an " - "emptry string for column %s with a prepared statement" % (col, )) - - # insert values for all columns - values = ['text', 'ascii', 1, True, Decimal('1.0'), 0.1, 0.1, - "1.2.3.4", 1, ['a'], set([1]), {'a': 1}, - datetime.now(), uuid4(), uuid1(), 'a', 1] - if self._cass_version >= (2, 1, 4): - values.append('2014-01-01') - values.append('01:02:03.456789012') - - columns_string = ','.join(self._col_names) - placeholders = ','.join(["%s"] * len(self._col_names)) - insert = "INSERT INTO alltypes (key, %s) VALUES (5, %s)" % (columns_string, placeholders) - s.execute(insert, values) + + insert = s.prepare("INSERT INTO alltypes (zz, {0}) VALUES (4, ?)".format(col)) + with self.assertRaises(TypeError): + s.execute(insert, ['']) + + # verify that Nones can be inserted and overwrites existing data + # create the input + params = [] + for datatype in PRIMITIVE_DATATYPES: + params.append((get_sample(datatype))) + + # insert the data + columns_string = ','.join(col_names) + placeholders = ','.join(["%s"] * len(col_names)) + simple_insert = "INSERT INTO alltypes (zz, {0}) VALUES (5, {1})".format(columns_string, placeholders) + s.execute(simple_insert, params) # then insert None, which should null them out - null_values = [None] * len(self._col_names) - s.execute(insert, null_values) + null_values = [None] * len(col_names) + s.execute(simple_insert, null_values) - select = "SELECT %s FROM alltypes WHERE key=5" % columns_string - results = s.execute(select) - self.assertEqual([], [(name, val) for (name, val) in results[0].items() if val is not None]) + # check via simple statement + query = "SELECT {0} FROM alltypes WHERE zz=5".format(columns_string) + results = s.execute(query)[0] + for col in results: + self.assertEqual(None, col) - prepared = s.prepare(select) - results = s.execute(prepared.bind(())) - self.assertEqual([], [(name, val) for (name, val) in results[0].items() if val is not None]) + # check via prepared statement + select = s.prepare("SELECT {0} FROM alltypes WHERE zz=?".format(columns_string)) + results = s.execute(select.bind([5]))[0] + for col in results: + self.assertEqual(None, col) # do the same thing again, but use a prepared statement to insert the nulls - s.execute(insert, values) + s.execute(simple_insert, params) - placeholders = ','.join(["?"] * len(self._col_names)) - prepared = s.prepare("INSERT INTO alltypes (key, %s) VALUES (5, %s)" % (columns_string, placeholders)) - s.execute(prepared, null_values) + placeholders = ','.join(["?"] * len(col_names)) + insert = s.prepare("INSERT INTO alltypes (zz, {0}) VALUES (5, {1})".format(columns_string, placeholders)) + s.execute(insert, null_values) - results = s.execute(select) - self.assertEqual([], [(name, val) for (name, val) in results[0].items() if val is not None]) + results = s.execute(query)[0] + for col in results: + self.assertEqual(None, col) - prepared = s.prepare(select) - results = s.execute(prepared.bind(())) - self.assertEqual([], [(name, val) for (name, val) in results[0].items() if val is not None]) + results = s.execute(select.bind([5]))[0] + for col in results: + self.assertEqual(None, col) s.shutdown() - def test_empty_values(self): - s = self._session + def test_can_insert_empty_values_for_int32(self): + """ + Ensure Int32Type supports empty values + """ + + c = Cluster(protocol_version=PROTOCOL_VERSION) + s = c.connect("typetests") + s.execute("CREATE TABLE empty_values (a text PRIMARY KEY, b int)") s.execute("INSERT INTO empty_values (a, b) VALUES ('a', blobAsInt(0x))") try: @@ -364,8 +365,13 @@ def test_empty_values(self): finally: Int32Type.support_empty_values = False - def test_timezone_aware_datetimes(self): - """ Ensure timezone-aware datetimes are converted to timestamps correctly """ + c.shutdown() + + def test_timezone_aware_datetimes_are_timestamps(self): + """ + Ensure timezone-aware datetimes are converted to timestamps correctly + """ + try: import pytz except ImportError as exc: @@ -375,22 +381,25 @@ def test_timezone_aware_datetimes(self): eastern_tz = pytz.timezone('US/Eastern') eastern_tz.localize(dt) - s = self._session + c = Cluster(protocol_version=PROTOCOL_VERSION) + s = c.connect("typetests") s.execute("CREATE TABLE tz_aware (a ascii PRIMARY KEY, b timestamp)") # test non-prepared statement - s.execute("INSERT INTO tz_aware (a, b) VALUES ('key1', %s)", parameters=(dt,)) + s.execute("INSERT INTO tz_aware (a, b) VALUES ('key1', %s)", [dt]) result = s.execute("SELECT b FROM tz_aware WHERE a='key1'")[0].b self.assertEqual(dt.utctimetuple(), result.utctimetuple()) # test prepared statement - prepared = s.prepare("INSERT INTO tz_aware (a, b) VALUES ('key2', ?)") - s.execute(prepared, parameters=(dt,)) + insert = s.prepare("INSERT INTO tz_aware (a, b) VALUES ('key2', ?)") + s.execute(insert.bind([dt])) result = s.execute("SELECT b FROM tz_aware WHERE a='key2'")[0].b self.assertEqual(dt.utctimetuple(), result.utctimetuple()) - def test_tuple_type(self): + c.shutdown() + + def test_can_insert_tuples(self): """ Basic test of tuple functionality """ @@ -398,8 +407,8 @@ def test_tuple_type(self): if self._cass_version < (2, 1, 0): raise unittest.SkipTest("The tuple type was introduced in Cassandra 2.1") - s = self._session.cluster.connect() - s.set_keyspace(self._session.keyspace) + c = Cluster(protocol_version=PROTOCOL_VERSION) + s = c.connect("typetests") # use this encoder in order to insert tuples s.encoder.mapping[tuple] = s.encoder.cql_encode_tuple @@ -439,9 +448,9 @@ def test_tuple_type(self): self.assertEqual(partial_result, s.execute(prepared, (4,))[0].b) self.assertEqual(subpartial_result, s.execute(prepared, (5,))[0].b) - s.shutdown() + c.shutdown() - def test_tuple_type_varying_lengths(self): + def test_can_insert_tuples_with_varying_lengths(self): """ Test tuple types of lengths of 1, 2, 3, and 384 to ensure edge cases work as expected. @@ -450,8 +459,8 @@ def test_tuple_type_varying_lengths(self): if self._cass_version < (2, 1, 0): raise unittest.SkipTest("The tuple type was introduced in Cassandra 2.1") - s = self._session.cluster.connect() - s.set_keyspace(self._session.keyspace) + c = Cluster(protocol_version=PROTOCOL_VERSION) + s = c.connect("typetests") # set the row_factory to dict_factory for programmatic access # set the encoder for tuples for the ability to write tuples @@ -479,9 +488,9 @@ def test_tuple_type_varying_lengths(self): result = s.execute("SELECT v_%s FROM tuple_lengths WHERE k=0", (i,))[0] self.assertEqual(tuple(created_tuple), result['v_%s' % i]) - s.shutdown() + c.shutdown() - def test_tuple_primitive_subtypes(self): + def test_can_insert_tuples_all_primitive_datatypes(self): """ Ensure tuple subtypes are appropriately handled. """ @@ -489,28 +498,28 @@ def test_tuple_primitive_subtypes(self): if self._cass_version < (2, 1, 0): raise unittest.SkipTest("The tuple type was introduced in Cassandra 2.1") - s = self._session.cluster.connect() - s.set_keyspace(self._session.keyspace) + c = Cluster(protocol_version=PROTOCOL_VERSION) + s = c.connect("typetests") s.encoder.mapping[tuple] = s.encoder.cql_encode_tuple s.execute("CREATE TABLE tuple_primitive (" "k int PRIMARY KEY, " - "v frozen>)" % ','.join(DATA_TYPE_PRIMITIVES)) + "v frozen>)" % ','.join(PRIMITIVE_DATATYPES)) - for i in range(len(DATA_TYPE_PRIMITIVES)): + for i in range(len(PRIMITIVE_DATATYPES)): # create tuples to be written and ensure they match with the expected response # responses have trailing None values for every element that has not been written - created_tuple = [get_sample(DATA_TYPE_PRIMITIVES[j]) for j in range(i + 1)] - response_tuple = tuple(created_tuple + [None for j in range(len(DATA_TYPE_PRIMITIVES) - i - 1)]) + created_tuple = [get_sample(PRIMITIVE_DATATYPES[j]) for j in range(i + 1)] + response_tuple = tuple(created_tuple + [None for j in range(len(PRIMITIVE_DATATYPES) - i - 1)]) written_tuple = tuple(created_tuple) s.execute("INSERT INTO tuple_primitive (k, v) VALUES (%s, %s)", (i, written_tuple)) result = s.execute("SELECT v FROM tuple_primitive WHERE k=%s", (i,))[0] self.assertEqual(response_tuple, result.v) - s.shutdown() + c.shutdown() - def test_tuple_non_primitive_subtypes(self): + def test_can_insert_tuples_all_collection_datatypes(self): """ Ensure tuple subtypes are appropriately handled for maps, sets, and lists. """ @@ -518,8 +527,8 @@ def test_tuple_non_primitive_subtypes(self): if self._cass_version < (2, 1, 0): raise unittest.SkipTest("The tuple type was introduced in Cassandra 2.1") - s = self._session.cluster.connect() - s.set_keyspace(self._session.keyspace) + c = Cluster(protocol_version=PROTOCOL_VERSION) + s = c.connect("typetests") # set the row_factory to dict_factory for programmatic access # set the encoder for tuples for the ability to write tuples @@ -529,15 +538,15 @@ def test_tuple_non_primitive_subtypes(self): values = [] # create list values - for datatype in DATA_TYPE_PRIMITIVES: + for datatype in PRIMITIVE_DATATYPES: values.append('v_{} frozen>>'.format(len(values), datatype)) # create set values - for datatype in DATA_TYPE_PRIMITIVES: + for datatype in PRIMITIVE_DATATYPES: values.append('v_{} frozen>>'.format(len(values), datatype)) # create map values - for datatype in DATA_TYPE_PRIMITIVES: + for datatype in PRIMITIVE_DATATYPES: datatype_1 = datatype_2 = datatype if datatype == 'blob': # unhashable type: 'bytearray' @@ -545,9 +554,9 @@ def test_tuple_non_primitive_subtypes(self): values.append('v_{} frozen>>'.format(len(values), datatype_1, datatype_2)) # make sure we're testing all non primitive data types in the future - if set(DATA_TYPE_NON_PRIMITIVE_NAMES) != set(['tuple', 'list', 'map', 'set']): + if set(COLLECTION_TYPES) != set(['tuple', 'list', 'map', 'set']): raise NotImplemented('Missing datatype not implemented: {}'.format( - set(DATA_TYPE_NON_PRIMITIVE_NAMES) - set(['tuple', 'list', 'map', 'set']) + set(COLLECTION_TYPES) - set(['tuple', 'list', 'map', 'set']) )) # create table @@ -557,7 +566,7 @@ def test_tuple_non_primitive_subtypes(self): i = 0 # test tuple> - for datatype in DATA_TYPE_PRIMITIVES: + for datatype in PRIMITIVE_DATATYPES: created_tuple = tuple([[get_sample(datatype)]]) s.execute("INSERT INTO tuple_non_primative (k, v_%s) VALUES (0, %s)", (i, created_tuple)) @@ -566,7 +575,7 @@ def test_tuple_non_primitive_subtypes(self): i += 1 # test tuple> - for datatype in DATA_TYPE_PRIMITIVES: + for datatype in PRIMITIVE_DATATYPES: created_tuple = tuple([sortedset([get_sample(datatype)])]) s.execute("INSERT INTO tuple_non_primative (k, v_%s) VALUES (0, %s)", (i, created_tuple)) @@ -575,7 +584,7 @@ def test_tuple_non_primitive_subtypes(self): i += 1 # test tuple> - for datatype in DATA_TYPE_PRIMITIVES: + for datatype in PRIMITIVE_DATATYPES: if datatype == 'blob': # unhashable type: 'bytearray' created_tuple = tuple([{get_sample('ascii'): get_sample(datatype)}]) @@ -587,7 +596,7 @@ def test_tuple_non_primitive_subtypes(self): result = s.execute("SELECT v_%s FROM tuple_non_primative WHERE k=0", (i,))[0] self.assertEqual(created_tuple, result['v_%s' % i]) i += 1 - s.shutdown() + c.shutdown() def nested_tuples_schema_helper(self, depth): """ @@ -609,7 +618,7 @@ def nested_tuples_creator_helper(self, depth): else: return (self.nested_tuples_creator_helper(depth - 1), ) - def test_nested_tuples(self): + def test_can_insert_nested_tuples(self): """ Ensure nested are appropriately handled. """ @@ -617,8 +626,8 @@ def test_nested_tuples(self): if self._cass_version < (2, 1, 0): raise unittest.SkipTest("The tuple type was introduced in Cassandra 2.1") - s = self._session.cluster.connect() - s.set_keyspace(self._session.keyspace) + c = Cluster(protocol_version=PROTOCOL_VERSION) + s = c.connect("typetests") # set the row_factory to dict_factory for programmatic access # set the encoder for tuples for the ability to write tuples @@ -647,16 +656,18 @@ def test_nested_tuples(self): # verify tuple was written and read correctly result = s.execute("SELECT v_%s FROM nested_tuples WHERE k=%s", (i, i))[0] self.assertEqual(created_tuple, result['v_%s' % i]) - s.shutdown() + c.shutdown() - def test_tuples_with_nulls(self): + def test_can_insert_tuples_with_nulls(self): """ Test tuples with null and empty string fields. """ + if self._cass_version < (2, 1, 0): raise unittest.SkipTest("The tuple type was introduced in Cassandra 2.1") - s = self._session + c = Cluster(protocol_version=PROTOCOL_VERSION) + s = c.connect("typetests") s.execute("CREATE TABLE tuples_nulls (k int PRIMARY KEY, t frozen>)") @@ -675,75 +686,29 @@ def test_tuples_with_nulls(self): self.assertEqual(('', None, None, b''), result[0].t) self.assertEqual(('', None, None, b''), s.execute(read)[0].t) - def test_unicode_query_string(self): - s = self._session + c.shutdown() - query = u"SELECT * FROM system.schema_columnfamilies WHERE keyspace_name = 'ef\u2052ef' AND columnfamily_name = %s" - s.execute(query, (u"fe\u2051fe",)) - - def insert_select_column(self, session, table_name, column_name, value): - insert = session.prepare("INSERT INTO %s (k, %s) VALUES (?, ?)" % (table_name, column_name)) - session.execute(insert, (0, value)) - result = session.execute("SELECT %s FROM %s WHERE k=%%s" % (column_name, table_name), (0,))[0][0] - self.assertEqual(result, value) - - def test_nested_collections(self): + def test_can_insert_unicode_query_string(self): + """ + Test to ensure unicode strings can be used in a query + """ - if self._cass_version < (2, 1, 3): - raise unittest.SkipTest("Support for nested collections was introduced in Cassandra 2.1.3") + c = Cluster(protocol_version=PROTOCOL_VERSION) + s = c.connect("typetests") - if PROTOCOL_VERSION < 3: - raise unittest.SkipTest("Protocol version > 3 required for nested collections") + query = u"SELECT * FROM system.schema_columnfamilies WHERE keyspace_name = 'ef\u2052ef' AND columnfamily_name = %s" + s.execute(query, (u"fe\u2051fe",)) - name = self._testMethodName + c.shutdown() - s = self._session.cluster.connect() - s.set_keyspace(self._session.keyspace) - s.encoder.mapping[tuple] = s.encoder.cql_encode_tuple + def test_can_read_composite_type(self): + """ + Test to ensure that CompositeTypes can be used in a query + """ - s.execute(""" - CREATE TYPE %s ( - m frozen>, - t tuple, - l frozen>, - s frozen> - )""" % name) - s.execute(""" - CREATE TYPE %s_nested ( - m frozen>, - t tuple, - l frozen>, - s frozen>, - u frozen<%s> - )""" % (name, name)) - s.execute(""" - CREATE TABLE %s ( - k int PRIMARY KEY, - map_map map>, frozen>>, - map_set map>, frozen>>, - map_list map>, frozen>>, - map_tuple map>, frozen>>, - map_udt map, frozen<%s>>, - )""" % (name, name, name)) - - validate = partial(self.insert_select_column, s, name) - validate('map_map', OrderedMap([({1: 1, 2: 2}, {3: 3, 4: 4}), ({5: 5, 6: 6}, {7: 7, 8: 8})])) - validate('map_set', OrderedMap([(set((1, 2)), set((3, 4))), (set((5, 6)), set((7, 8)))])) - validate('map_list', OrderedMap([([1, 2], [3, 4]), ([5, 6], [7, 8])])) - validate('map_tuple', OrderedMap([((1, 2), (3,)), ((4, 5), (6,))])) - - value = nested_collection_udt({1: 'v1', 2: 'v2'}, (3, 'v3'), [4, 5, 6, 7], set((8, 9, 10))) - key = nested_collection_udt_nested(value.m, value.t, value.l, value.s, value) - key2 = nested_collection_udt_nested({3: 'v3'}, value.t, value.l, value.s, value) - validate('map_udt', OrderedMap([(key, value), (key2, value)])) - - s.execute("DROP TABLE %s" % (name)) - s.execute("DROP TYPE %s_nested" % (name)) - s.execute("DROP TYPE %s" % (name)) - s.shutdown() + c = Cluster(protocol_version=PROTOCOL_VERSION) + s = c.connect("typetests") - def test_reading_composite_type(self): - s = self._session s.execute(""" CREATE TABLE composites ( a int PRIMARY KEY, @@ -761,3 +726,5 @@ def test_reading_composite_type(self): result = s.execute("SELECT * FROM composites WHERE a = 0")[0] self.assertEqual(0, result.a) self.assertEqual(('abc',), result.b) + + c.shutdown() From e2dcc133bdf658b35e3a3ed922b886fc76952358 Mon Sep 17 00:00:00 2001 From: Kishan Karunaratne Date: Tue, 7 Apr 2015 17:02:53 -0700 Subject: [PATCH 1315/3726] Refactored UDTTests --- tests/integration/standard/test_udts.py | 472 ++++++++++++------------ 1 file changed, 238 insertions(+), 234 deletions(-) diff --git a/tests/integration/standard/test_udts.py b/tests/integration/standard/test_udts.py index acdb42a183..10b9dd390c 100644 --- a/tests/integration/standard/test_udts.py +++ b/tests/integration/standard/test_udts.py @@ -11,51 +11,68 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from cassandra.query import dict_factory try: import unittest2 as unittest except ImportError: - import unittest # noqa + import unittest # noqa import logging log = logging.getLogger(__name__) from collections import namedtuple +from functools import partial +from cassandra import InvalidRequest from cassandra.cluster import Cluster, UserTypeDoesNotExist +from cassandra.query import dict_factory +from cassandra.util import OrderedMap from tests.integration import get_server_versions, use_singledc, PROTOCOL_VERSION -from tests.integration.datatype_utils import get_sample, get_nonprim_sample,\ - DATA_TYPE_PRIMITIVES, DATA_TYPE_NON_PRIMITIVE_NAMES +from tests.integration.datatype_utils import update_datatypes, PRIMITIVE_DATATYPES, COLLECTION_TYPES, \ + get_sample, get_collection_sample + +nested_collection_udt = namedtuple('nested_collection_udt', ['m', 't', 'l', 's']) +nested_collection_udt_nested = namedtuple('nested_collection_udt_nested', ['m', 't', 'l', 's', 'u']) def setup_module(): use_singledc() + update_datatypes() -class TypeTests(unittest.TestCase): +class UDTTests(unittest.TestCase): def setUp(self): - if PROTOCOL_VERSION < 3: - raise unittest.SkipTest("v3 protocol is required for UDT tests") - self._cass_version, self._cql_version = get_server_versions() - def test_unprepared_registered_udts(self): + if self._cass_version < (2, 1, 0): + raise unittest.SkipTest("User Defined Types were introduced in Cassandra 2.1") + + self.cluster = Cluster(protocol_version=PROTOCOL_VERSION) + self.session = self.cluster.connect() + self.session.execute("CREATE KEYSPACE udttests WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1'}") + self.cluster.shutdown() + + def tearDown(self): + self.cluster = Cluster(protocol_version=PROTOCOL_VERSION) + self.session = self.cluster.connect() + self.session.execute("DROP KEYSPACE udttests") + self.cluster.shutdown() + + def test_can_insert_unprepared_registered_udts(self): + """ + Test the insertion of unprepared, registered UDTs + """ + c = Cluster(protocol_version=PROTOCOL_VERSION) - s = c.connect() + s = c.connect("udttests") - s.execute(""" - CREATE KEYSPACE udt_test_unprepared_registered - WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1' } - """) - s.set_keyspace("udt_test_unprepared_registered") s.execute("CREATE TYPE user (age int, name text)") s.execute("CREATE TABLE mytable (a int PRIMARY KEY, b frozen)") User = namedtuple('user', ('age', 'name')) - c.register_user_type("udt_test_unprepared_registered", "user", User) + c.register_user_type("udttests", "user", User) s.execute("INSERT INTO mytable (a, b) VALUES (%s, %s)", (0, User(42, 'bob'))) result = s.execute("SELECT b FROM mytable WHERE a=0") @@ -87,9 +104,10 @@ def test_unprepared_registered_udts(self): c.shutdown() - def test_register_before_connecting(self): - User1 = namedtuple('user', ('age', 'name')) - User2 = namedtuple('user', ('state', 'is_cool')) + def test_can_register_udt_before_connecting(self): + """ + Test the registration of UDTs before session creation + """ c = Cluster(protocol_version=PROTOCOL_VERSION) s = c.connect() @@ -113,6 +131,10 @@ def test_register_before_connecting(self): # now that types are defined, shutdown and re-create Cluster c.shutdown() c = Cluster(protocol_version=PROTOCOL_VERSION) + + User1 = namedtuple('user', ('age', 'name')) + User2 = namedtuple('user', ('state', 'is_cool')) + c.register_user_type("udt_test_register_before_connecting", "user", User1) c.register_user_type("udt_test_register_before_connecting2", "user", User2) @@ -139,15 +161,14 @@ def test_register_before_connecting(self): c.shutdown() - def test_prepared_unregistered_udts(self): + def test_can_insert_prepared_unregistered_udts(self): + """ + Test the insertion of prepared, unregistered UDTs + """ + c = Cluster(protocol_version=PROTOCOL_VERSION) - s = c.connect() + s = c.connect("udttests") - s.execute(""" - CREATE KEYSPACE udt_test_prepared_unregistered - WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1' } - """) - s.set_keyspace("udt_test_prepared_unregistered") s.execute("CREATE TYPE user (age int, name text)") s.execute("CREATE TABLE mytable (a int PRIMARY KEY, b frozen)") @@ -184,18 +205,17 @@ def test_prepared_unregistered_udts(self): c.shutdown() - def test_prepared_registered_udts(self): + def test_can_insert_prepared_registered_udts(self): + """ + Test the insertion of prepared, registered UDTs + """ + c = Cluster(protocol_version=PROTOCOL_VERSION) - s = c.connect() + s = c.connect("udttests") - s.execute(""" - CREATE KEYSPACE udt_test_prepared_registered - WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1' } - """) - s.set_keyspace("udt_test_prepared_registered") s.execute("CREATE TYPE user (age int, name text)") User = namedtuple('user', ('age', 'name')) - c.register_user_type("udt_test_prepared_registered", "user", User) + c.register_user_type("udttests", "user", User) s.execute("CREATE TABLE mytable (a int PRIMARY KEY, b frozen)") @@ -235,21 +255,17 @@ def test_prepared_registered_udts(self): c.shutdown() - def test_udts_with_nulls(self): + def test_can_insert_udts_with_nulls(self): """ - Test UDTs with null and empty string fields. + Test the insertion of UDTs with null and empty string fields """ + c = Cluster(protocol_version=PROTOCOL_VERSION) - s = c.connect() + s = c.connect("udttests") - s.execute(""" - CREATE KEYSPACE test_udts_with_nulls - WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1' } - """) - s.set_keyspace("test_udts_with_nulls") s.execute("CREATE TYPE user (a text, b int, c uuid, d blob)") User = namedtuple('user', ('a', 'b', 'c', 'd')) - c.register_user_type("test_udts_with_nulls", "user", User) + c.register_user_type("udttests", "user", User) s.execute("CREATE TABLE mytable (a int PRIMARY KEY, b frozen)") @@ -270,38 +286,30 @@ def test_udts_with_nulls(self): c.shutdown() - def test_udt_sizes(self): + def test_can_insert_udts_with_varying_lengths(self): """ - Test for ensuring extra-lengthy udts are handled correctly. + Test for ensuring extra-lengthy udts are properly inserted """ - if self._cass_version < (2, 1, 0): - raise unittest.SkipTest("The tuple type was introduced in Cassandra 2.1") - - MAX_TEST_LENGTH = 16384 - EXTENDED_QUERY_TIMEOUT = 60 - c = Cluster(protocol_version=PROTOCOL_VERSION) - s = c.connect() + s = c.connect("udttests") - s.execute("""CREATE KEYSPACE test_udt_sizes - WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1'}""") - s.set_keyspace("test_udt_sizes") + MAX_TEST_LENGTH = 1024 # create the seed udt, increase timeout to avoid the query failure on slow systems s.execute("CREATE TYPE lengthy_udt ({})" .format(', '.join(['v_{} int'.format(i) - for i in range(MAX_TEST_LENGTH)])), timeout=EXTENDED_QUERY_TIMEOUT) + for i in range(MAX_TEST_LENGTH)]))) # create a table with multiple sizes of nested udts # no need for all nested types, only a spot checked few and the largest one s.execute("CREATE TABLE mytable (" "k int PRIMARY KEY, " - "v frozen)", timeout=EXTENDED_QUERY_TIMEOUT) + "v frozen)") # create and register the seed udt type udt = namedtuple('lengthy_udt', tuple(['v_{}'.format(i) for i in range(MAX_TEST_LENGTH)])) - c.register_user_type("test_udt_sizes", "lengthy_udt", udt) + c.register_user_type("udttests", "lengthy_udt", udt) # verify inserts and reads for i in (0, 1, 2, 3, MAX_TEST_LENGTH): @@ -313,245 +321,189 @@ def test_udt_sizes(self): s.execute("INSERT INTO mytable (k, v) VALUES (0, %s)", (created_udt,)) # verify udt was written and read correctly, increase timeout to avoid the query failure on slow systems - result = s.execute("SELECT v FROM mytable WHERE k=0", timeout=EXTENDED_QUERY_TIMEOUT)[0] + result = s.execute("SELECT v FROM mytable WHERE k=0")[0] self.assertEqual(created_udt, result.v) c.shutdown() - def nested_udt_helper(self, udts, i): - """ - Helper for creating nested udts. - """ + def nested_udt_schema_helper(self, session, MAX_NESTING_DEPTH): + # create the seed udt + session.execute("CREATE TYPE depth_0 (age int, name text)") + + # create the nested udts + for i in range(MAX_NESTING_DEPTH): + session.execute("CREATE TYPE depth_{} (value frozen)".format(i + 1, i)) + # create a table with multiple sizes of nested udts + # no need for all nested types, only a spot checked few and the largest one + session.execute("CREATE TABLE mytable (" + "k int PRIMARY KEY, " + "v_0 frozen, " + "v_1 frozen, " + "v_2 frozen, " + "v_3 frozen, " + "v_{0} frozen)".format(MAX_NESTING_DEPTH)) + + def nested_udt_creation_helper(self, udts, i): if i == 0: return udts[0](42, 'Bob') else: - return udts[i](self.nested_udt_helper(udts, i - 1)) - - def test_nested_registered_udts(self): - """ - Test for ensuring nested udts are handled correctly. - """ + return udts[i](self.nested_udt_creation_helper(udts, i - 1)) - if self._cass_version < (2, 1, 0): - raise unittest.SkipTest("The tuple type was introduced in Cassandra 2.1") + def nested_udt_verification_helper(self, session, MAX_NESTING_DEPTH, udts): + for i in (0, 1, 2, 3, MAX_NESTING_DEPTH): + # create udt + udt = self.nested_udt_creation_helper(udts, i) - MAX_NESTING_DEPTH = 16 + # write udt via simple statement + session.execute("INSERT INTO mytable (k, v_%s) VALUES (0, %s)", [i, udt]) - c = Cluster(protocol_version=PROTOCOL_VERSION) - s = c.connect() + # verify udt was written and read correctly + result = session.execute("SELECT v_{0} FROM mytable WHERE k=0".format(i))[0] + self.assertEqual(udt, result["v_{0}".format(i)]) - # set the row_factory to dict_factory for programmatically accessing values - s.row_factory = dict_factory + # write udt via prepared statement + insert = session.prepare("INSERT INTO mytable (k, v_{0}) VALUES (1, ?)".format(i)) + session.execute(insert, [udt]) - s.execute("""CREATE KEYSPACE test_nested_registered_udts - WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1'}""") - s.set_keyspace("test_nested_registered_udts") + # verify udt was written and read correctly + result = session.execute("SELECT v_{0} FROM mytable WHERE k=1".format(i))[0] + self.assertEqual(udt, result["v_{0}".format(i)]) - # create the seed udt - s.execute("CREATE TYPE depth_0 (age int, name text)") + def test_can_insert_nested_registered_udts(self): + """ + Test for ensuring nested registered udts are properly inserted + """ - # create the nested udts - for i in range(MAX_NESTING_DEPTH): - s.execute("CREATE TYPE depth_{} (value frozen)".format(i + 1, i)) + c = Cluster(protocol_version=PROTOCOL_VERSION) + s = c.connect("udttests") + s.row_factory = dict_factory - # create a table with multiple sizes of nested udts - # no need for all nested types, only a spot checked few and the largest one - s.execute("CREATE TABLE mytable (" - "k int PRIMARY KEY, " - "v_0 frozen, " - "v_1 frozen, " - "v_2 frozen, " - "v_3 frozen, " - "v_{0} frozen)".format(MAX_NESTING_DEPTH)) + MAX_NESTING_DEPTH = 16 - # create the udt container - udts = [] + # create the schema + self.nested_udt_schema_helper(s, MAX_NESTING_DEPTH) # create and register the seed udt type + udts = [] udt = namedtuple('depth_0', ('age', 'name')) udts.append(udt) - c.register_user_type("test_nested_registered_udts", "depth_0", udts[0]) + c.register_user_type("udttests", "depth_0", udts[0]) # create and register the nested udt types for i in range(MAX_NESTING_DEPTH): udt = namedtuple('depth_{}'.format(i + 1), ('value')) udts.append(udt) - c.register_user_type("test_nested_registered_udts", "depth_{}".format(i + 1), udts[i + 1]) + c.register_user_type("udttests", "depth_{}".format(i + 1), udts[i + 1]) - # verify inserts and reads - for i in (0, 1, 2, 3, MAX_NESTING_DEPTH): - # create udt - udt = self.nested_udt_helper(udts, i) - - # write udt - s.execute("INSERT INTO mytable (k, v_%s) VALUES (0, %s)", (i, udt)) - - # verify udt was written and read correctly - result = s.execute("SELECT v_%s FROM mytable WHERE k=0", (i,))[0] - self.assertEqual(udt, result['v_%s' % i]) + # insert udts and verify inserts with reads + self.nested_udt_verification_helper(s, MAX_NESTING_DEPTH, udts) c.shutdown() - def test_nested_unregistered_udts(self): + def test_can_insert_nested_unregistered_udts(self): """ - Test for ensuring nested unregistered udts are handled correctly. + Test for ensuring nested unregistered udts are properly inserted """ - if self._cass_version < (2, 1, 0): - raise unittest.SkipTest("The tuple type was introduced in Cassandra 2.1") - - MAX_NESTING_DEPTH = 16 c = Cluster(protocol_version=PROTOCOL_VERSION) - s = c.connect() - - # set the row_factory to dict_factory for programmatically accessing values + s = c.connect("udttests") s.row_factory = dict_factory - s.execute("""CREATE KEYSPACE test_nested_unregistered_udts - WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1'}""") - s.set_keyspace("test_nested_unregistered_udts") - - # create the seed udt - s.execute("CREATE TYPE depth_0 (age int, name text)") - - # create the nested udts - for i in range(MAX_NESTING_DEPTH): - s.execute("CREATE TYPE depth_{} (value frozen)".format(i + 1, i)) + MAX_NESTING_DEPTH = 16 - # create a table with multiple sizes of nested udts - # no need for all nested types, only a spot checked few and the largest one - s.execute("CREATE TABLE mytable (" - "k int PRIMARY KEY, " - "v_0 frozen, " - "v_1 frozen, " - "v_2 frozen, " - "v_3 frozen, " - "v_{0} frozen)".format(MAX_NESTING_DEPTH)) + # create the schema + self.nested_udt_schema_helper(s, MAX_NESTING_DEPTH) - # create the udt container + # create the seed udt type udts = [] - - # create and register the seed udt type udt = namedtuple('depth_0', ('age', 'name')) udts.append(udt) - # create and register the nested udt types + # create the nested udt types for i in range(MAX_NESTING_DEPTH): udt = namedtuple('depth_{}'.format(i + 1), ('value')) udts.append(udt) - # verify inserts and reads + # insert udts via prepared statements and verify inserts with reads for i in (0, 1, 2, 3, MAX_NESTING_DEPTH): # create udt - udt = self.nested_udt_helper(udts, i) + udt = self.nested_udt_creation_helper(udts, i) # write udt insert = s.prepare("INSERT INTO mytable (k, v_{0}) VALUES (0, ?)".format(i)) - s.execute(insert, (udt,)) + s.execute(insert, [udt]) # verify udt was written and read correctly - result = s.execute("SELECT v_%s FROM mytable WHERE k=0", (i,))[0] - self.assertEqual(udt, result['v_%s' % i]) + result = s.execute("SELECT v_{0} FROM mytable WHERE k=0".format(i))[0] + self.assertEqual(udt, result["v_{0}".format(i)]) c.shutdown() - def test_nested_registered_udts_with_different_namedtuples(self): + def test_can_insert_nested_registered_udts_with_different_namedtuples(self): """ - Test for ensuring nested udts are handled correctly when the + Test for ensuring nested udts are inserted correctly when the created namedtuples are use names that are different the cql type. - - Future improvement: optimize these three related tests using a single - helper method to cut down on code repetition. """ - if self._cass_version < (2, 1, 0): - raise unittest.SkipTest("The tuple type was introduced in Cassandra 2.1") - - MAX_NESTING_DEPTH = 16 - c = Cluster(protocol_version=PROTOCOL_VERSION) - s = c.connect() - - # set the row_factory to dict_factory for programmatically accessing values + s = c.connect("udttests") s.row_factory = dict_factory - s.execute("""CREATE KEYSPACE different_namedtuples - WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1'}""") - s.set_keyspace("different_namedtuples") - - # create the seed udt - s.execute("CREATE TYPE depth_0 (age int, name text)") - - # create the nested udts - for i in range(MAX_NESTING_DEPTH): - s.execute("CREATE TYPE depth_{} (value frozen)".format(i + 1, i)) - - # create a table with multiple sizes of nested udts - # no need for all nested types, only a spot checked few and the largest one - s.execute("CREATE TABLE mytable (" - "k int PRIMARY KEY, " - "v_0 frozen, " - "v_1 frozen, " - "v_2 frozen, " - "v_3 frozen, " - "v_{0} frozen)".format(MAX_NESTING_DEPTH)) + MAX_NESTING_DEPTH = 16 - # create the udt container - udts = [] + # create the schema + self.nested_udt_schema_helper(s, MAX_NESTING_DEPTH) # create and register the seed udt type + udts = [] udt = namedtuple('level_0', ('age', 'name')) udts.append(udt) - c.register_user_type("different_namedtuples", "depth_0", udts[0]) + c.register_user_type("udttests", "depth_0", udts[0]) # create and register the nested udt types for i in range(MAX_NESTING_DEPTH): udt = namedtuple('level_{}'.format(i + 1), ('value')) udts.append(udt) - c.register_user_type("different_namedtuples", "depth_{}".format(i + 1), udts[i + 1]) - - # verify inserts and reads - for i in (0, 1, 2, 3, MAX_NESTING_DEPTH): - # create udt - udt = self.nested_udt_helper(udts, i) + c.register_user_type("udttests", "depth_{}".format(i + 1), udts[i + 1]) - # write udt - s.execute("INSERT INTO mytable (k, v_%s) VALUES (0, %s)", (i, udt)) - - # verify udt was written and read correctly - result = s.execute("SELECT v_%s FROM mytable WHERE k=0", (i,))[0] - self.assertEqual(udt, result['v_%s' % i]) + # insert udts and verify inserts with reads + self.nested_udt_verification_helper(s, MAX_NESTING_DEPTH, udts) c.shutdown() - def test_non_existing_types(self): + def test_raise_error_on_nonexisting_udts(self): + """ + Test for ensuring that an error is raised for operating on a nonexisting udt or an invalid keyspace + """ + c = Cluster(protocol_version=PROTOCOL_VERSION) - c.connect() + s = c.connect("udttests") User = namedtuple('user', ('age', 'name')) - self.assertRaises(UserTypeDoesNotExist, c.register_user_type, "some_bad_keyspace", "user", User) - self.assertRaises(UserTypeDoesNotExist, c.register_user_type, "system", "user", User) + + with self.assertRaises(UserTypeDoesNotExist): + c.register_user_type("some_bad_keyspace", "user", User) + + with self.assertRaises(UserTypeDoesNotExist): + c.register_user_type("system", "user", User) + + with self.assertRaises(InvalidRequest): + s.execute("CREATE TABLE mytable (a int PRIMARY KEY, b frozen)") c.shutdown() - def test_primitive_datatypes(self): + def test_can_insert_udt_all_datatypes(self): """ - Test for inserting various types of DATA_TYPE_PRIMITIVES into UDT's + Test for inserting various types of PRIMITIVE_DATATYPES into UDT's """ - c = Cluster(protocol_version=PROTOCOL_VERSION) - s = c.connect() - # create keyspace - s.execute(""" - CREATE KEYSPACE test_primitive_datatypes - WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1' } - """) - s.set_keyspace("test_primitive_datatypes") + c = Cluster(protocol_version=PROTOCOL_VERSION) + s = c.connect("udttests") # create UDT alpha_type_list = [] start_index = ord('a') - for i, datatype in enumerate(DATA_TYPE_PRIMITIVES): + for i, datatype in enumerate(PRIMITIVE_DATATYPES): alpha_type_list.append("{0} {1}".format(chr(start_index + i), datatype)) s.execute(""" @@ -563,14 +515,14 @@ def test_primitive_datatypes(self): # register UDT alphabet_list = [] - for i in range(ord('a'), ord('a') + len(DATA_TYPE_PRIMITIVES)): + for i in range(ord('a'), ord('a') + len(PRIMITIVE_DATATYPES)): alphabet_list.append('{}'.format(chr(i))) Alldatatypes = namedtuple("alldatatypes", alphabet_list) - c.register_user_type("test_primitive_datatypes", "alldatatypes", Alldatatypes) + c.register_user_type("udttests", "alldatatypes", Alldatatypes) # insert UDT data params = [] - for datatype in DATA_TYPE_PRIMITIVES: + for datatype in PRIMITIVE_DATATYPES: params.append((get_sample(datatype))) insert = s.prepare("INSERT INTO mytable (a, b) VALUES (?, ?)") @@ -586,34 +538,28 @@ def test_primitive_datatypes(self): c.shutdown() - def test_nonprimitive_datatypes(self): + def test_can_insert_udt_all_collection_datatypes(self): """ - Test for inserting various types of DATA_TYPE_NON_PRIMITIVE into UDT's + Test for inserting various types of COLLECTION_TYPES into UDT's """ - c = Cluster(protocol_version=PROTOCOL_VERSION) - s = c.connect() - # create keyspace - s.execute(""" - CREATE KEYSPACE test_nonprimitive_datatypes - WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1' } - """) - s.set_keyspace("test_nonprimitive_datatypes") + c = Cluster(protocol_version=PROTOCOL_VERSION) + s = c.connect("udttests") # create UDT alpha_type_list = [] start_index = ord('a') - for i, nonprim_datatype in enumerate(DATA_TYPE_NON_PRIMITIVE_NAMES): - for j, datatype in enumerate(DATA_TYPE_PRIMITIVES): - if nonprim_datatype == "map": + for i, collection_type in enumerate(COLLECTION_TYPES): + for j, datatype in enumerate(PRIMITIVE_DATATYPES): + if collection_type == "map": type_string = "{0}_{1} {2}<{3}, {3}>".format(chr(start_index + i), chr(start_index + j), - nonprim_datatype, datatype) - elif nonprim_datatype == "tuple": + collection_type, datatype) + elif collection_type == "tuple": type_string = "{0}_{1} frozen<{2}<{3}>>".format(chr(start_index + i), chr(start_index + j), - nonprim_datatype, datatype) + collection_type, datatype) else: type_string = "{0}_{1} {2}<{3}>".format(chr(start_index + i), chr(start_index + j), - nonprim_datatype, datatype) + collection_type, datatype) alpha_type_list.append(type_string) s.execute(""" @@ -625,18 +571,18 @@ def test_nonprimitive_datatypes(self): # register UDT alphabet_list = [] - for i in range(ord('a'), ord('a') + len(DATA_TYPE_NON_PRIMITIVE_NAMES)): - for j in range(ord('a'), ord('a') + len(DATA_TYPE_PRIMITIVES)): + for i in range(ord('a'), ord('a') + len(COLLECTION_TYPES)): + for j in range(ord('a'), ord('a') + len(PRIMITIVE_DATATYPES)): alphabet_list.append('{0}_{1}'.format(chr(i), chr(j))) Alldatatypes = namedtuple("alldatatypes", alphabet_list) - c.register_user_type("test_nonprimitive_datatypes", "alldatatypes", Alldatatypes) + c.register_user_type("udttests", "alldatatypes", Alldatatypes) # insert UDT data params = [] - for nonprim_datatype in DATA_TYPE_NON_PRIMITIVE_NAMES: - for datatype in DATA_TYPE_PRIMITIVES: - params.append((get_nonprim_sample(nonprim_datatype, datatype))) + for collection_type in COLLECTION_TYPES: + for datatype in PRIMITIVE_DATATYPES: + params.append((get_collection_sample(collection_type, datatype))) insert = s.prepare("INSERT INTO mytable (a, b) VALUES (?, ?)") s.execute(insert, (0, Alldatatypes(*params))) @@ -650,3 +596,61 @@ def test_nonprimitive_datatypes(self): self.assertEqual(expected, actual) c.shutdown() + + def insert_select_column(self, session, table_name, column_name, value): + insert = session.prepare("INSERT INTO %s (k, %s) VALUES (?, ?)" % (table_name, column_name)) + session.execute(insert, (0, value)) + result = session.execute("SELECT %s FROM %s WHERE k=%%s" % (column_name, table_name), (0,))[0][0] + self.assertEqual(result, value) + + def test_can_insert_nested_collections(self): + """ + Test for inserting various types of nested COLLECTION_TYPES into tables and UDTs + """ + + if self._cass_version < (2, 1, 3): + raise unittest.SkipTest("Support for nested collections was introduced in Cassandra 2.1.3") + + c = Cluster(protocol_version=PROTOCOL_VERSION) + s = c.connect("udttests") + s.encoder.mapping[tuple] = s.encoder.cql_encode_tuple + + name = self._testMethodName + + s.execute(""" + CREATE TYPE %s ( + m frozen>, + t tuple, + l frozen>, + s frozen> + )""" % name) + s.execute(""" + CREATE TYPE %s_nested ( + m frozen>, + t tuple, + l frozen>, + s frozen>, + u frozen<%s> + )""" % (name, name)) + s.execute(""" + CREATE TABLE %s ( + k int PRIMARY KEY, + map_map map>, frozen>>, + map_set map>, frozen>>, + map_list map>, frozen>>, + map_tuple map>, frozen>>, + map_udt map, frozen<%s>>, + )""" % (name, name, name)) + + validate = partial(self.insert_select_column, s, name) + validate('map_map', OrderedMap([({1: 1, 2: 2}, {3: 3, 4: 4}), ({5: 5, 6: 6}, {7: 7, 8: 8})])) + validate('map_set', OrderedMap([(set((1, 2)), set((3, 4))), (set((5, 6)), set((7, 8)))])) + validate('map_list', OrderedMap([([1, 2], [3, 4]), ([5, 6], [7, 8])])) + validate('map_tuple', OrderedMap([((1, 2), (3,)), ((4, 5), (6,))])) + + value = nested_collection_udt({1: 'v1', 2: 'v2'}, (3, 'v3'), [4, 5, 6, 7], set((8, 9, 10))) + key = nested_collection_udt_nested(value.m, value.t, value.l, value.s, value) + key2 = nested_collection_udt_nested({3: 'v3'}, value.t, value.l, value.s, value) + validate('map_udt', OrderedMap([(key, value), (key2, value)])) + + c.shutdown() \ No newline at end of file From e89a23a91b0c5de6b84c75884aaf61b65f68d6c9 Mon Sep 17 00:00:00 2001 From: Kishan Karunaratne Date: Tue, 7 Apr 2015 17:20:09 -0700 Subject: [PATCH 1316/3726] add pure-sasl as a test-requirement for SaslAuthenticatorTests --- test-requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/test-requirements.txt b/test-requirements.txt index e4d2d3836b..8fc8dad04a 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -8,3 +8,4 @@ unittest2 PyYAML pytz sure +pure-sasl From 79c89ed67c60aed3c6e2ff67fd8469eb93873fc6 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 8 Apr 2015 11:26:33 -0500 Subject: [PATCH 1317/3726] Add C* 3.0/v4 to integration test default protocol --- tests/integration/__init__.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index ce3f042bf6..3ea397f01e 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -118,7 +118,9 @@ def _tuple_version(version_string): log.info('Using Cassandra version: %s', CASSANDRA_VERSION) CCM_KWARGS['version'] = CASSANDRA_VERSION -if CASSANDRA_VERSION > '2.1': +if CASSANDRA_VERSION > '3.0': + default_protocol_version = 4 +elif CASSANDRA_VERSION > '2.1': default_protocol_version = 3 elif CASSANDRA_VERSION > '2.0': default_protocol_version = 2 From 37677993faee7ec4c31da651cf67b13a7b34db03 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 8 Apr 2015 12:42:51 -0500 Subject: [PATCH 1318/3726] Add a test for prepared statements with no column meta --- .../standard/test_prepared_statements.py | 25 +++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/tests/integration/standard/test_prepared_statements.py b/tests/integration/standard/test_prepared_statements.py index a75e6bc3d2..9240996ce8 100644 --- a/tests/integration/standard/test_prepared_statements.py +++ b/tests/integration/standard/test_prepared_statements.py @@ -213,6 +213,31 @@ def test_none_values(self): cluster.shutdown() + def test_no_meta(self): + cluster = Cluster(protocol_version=PROTOCOL_VERSION) + session = cluster.connect() + + prepared = session.prepare( + """ + INSERT INTO test3rf.test (k, v) VALUES (0, 0) + """) + + self.assertIsInstance(prepared, PreparedStatement) + bound = prepared.bind(None) + session.execute(bound) + + prepared = session.prepare( + """ + SELECT * FROM test3rf.test WHERE k=0 + """) + self.assertIsInstance(prepared, PreparedStatement) + + bound = prepared.bind(None) + results = session.execute(bound) + self.assertEqual(results[0].v, 0) + + cluster.shutdown() + def test_none_values_dicts(self): """ Ensure binding None is handled correctly with dict bindings From 5dfc8b688d22d6fd8c309aee8e253947f3f667db Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 8 Apr 2015 15:30:49 -0500 Subject: [PATCH 1319/3726] Change R/W failure exception base to CoordinationFailure --- cassandra/__init__.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index c1e0bdd807..eacc29e09a 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -221,7 +221,7 @@ def __init__(self, message, write_type=None, **kwargs): self.write_type = write_type -class Failure(Exception): +class CoordinationFailure(Exception): """ Replicas sent a failure to the coordinator. """ @@ -252,12 +252,12 @@ def __init__(self, summary_message, consistency=None, required_responses=None, r repr({'consistency': consistency_value_to_name(consistency), 'required_responses': required_responses, 'received_responses': received_responses, - 'failures' : failures})) + 'failures': failures})) -class ReadFailure(Failure): +class ReadFailure(CoordinationFailure): """ - A subclass of :exc:`Failure` for read operations. + A subclass of :exc:`CoordinationFailure` for read operations. This indicates that the replicas sent a failure message to the coordinator. """ @@ -270,13 +270,13 @@ class ReadFailure(Failure): """ def __init__(self, message, data_retrieved=None, **kwargs): - Failure.__init__(self, message, **kwargs) + CoordinationFailure.__init__(self, message, **kwargs) self.data_retrieved = data_retrieved -class WriteFailure(Failure): +class WriteFailure(CoordinationFailure): """ - A subclass of :exc:`Failure` for write operations. + A subclass of :exc:`CoordinationFailure` for write operations. This indicates that the replicas sent a failure message to the coordinator. """ @@ -287,7 +287,7 @@ class WriteFailure(Failure): """ def __init__(self, message, write_type=None, **kwargs): - Failure.__init__(self, message, **kwargs) + CoordinationFailure.__init__(self, message, **kwargs) self.write_type = write_type From 275a2663aa9e466b517729324d32765d17eae1bd Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 8 Apr 2015 15:31:40 -0500 Subject: [PATCH 1320/3726] Add FunctionFailure error message and exception --- cassandra/__init__.py | 27 +++++++++++++++++++++++++++ cassandra/protocol.py | 19 ++++++++++++++++++- 2 files changed, 45 insertions(+), 1 deletion(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index eacc29e09a..a9c7e21ad6 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -291,6 +291,33 @@ def __init__(self, message, write_type=None, **kwargs): self.write_type = write_type +class FunctionFailure(Exception): + """ + User Defined Function failed during execution + """ + + keyspace = None + """ + Keyspace of the function + """ + + function = None + """ + Name of the function + """ + + arg_types = None + """ + Argument types of the function + """ + + def __init__(self, summary_message, keyspace, function, arg_types): + self.keyspace = keyspace + self.function = function + self.arg_types = arg_types + Exception.__init__(self, summary_message) + + class AlreadyExists(Exception): """ An attempt was made to create a keyspace or table that already exists. diff --git a/cassandra/protocol.py b/cassandra/protocol.py index 527aa262b2..bdf7ae2b81 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -22,7 +22,7 @@ import io from cassandra import (Unavailable, WriteTimeout, ReadTimeout, - WriteFailure, ReadFailure, + WriteFailure, ReadFailure, FunctionFailure, AlreadyExists, InvalidRequest, Unauthorized, UnsupportedOperation) from cassandra.marshal import (int32_pack, int32_unpack, uint16_pack, uint16_unpack, @@ -282,6 +282,22 @@ def to_exception(self): return ReadFailure(self.summary_msg(), **self.info) +class FunctionFailureMessage(RequestExecutionException): + summary = "User Defined Function failure" + error_code = 0x1400 + + @staticmethod + def recv_error_info(f): + return { + 'keyspace': read_string(f), + 'function': read_string(f), + 'arg_types': [read_string(f) for _ in range(read_short(f))], + } + + def to_exception(self): + return FunctionFailure(self.summary_msg(), **self.info) + + class WriteFailureMessage(RequestExecutionException): summary = "Replica(s) failed to execute write" error_code = 0x1500 @@ -299,6 +315,7 @@ def recv_error_info(f): def to_exception(self): return WriteFailure(self.summary_msg(), **self.info) + class SyntaxException(RequestValidationException): summary = 'Syntax error in CQL query' error_code = 0x2000 From d5626750b184047a9f2f50465fd6edbb3a1b77cc Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 8 Apr 2015 16:47:56 -0500 Subject: [PATCH 1321/3726] Add new request exceptions to docs --- cassandra/__init__.py | 2 +- docs/api/cassandra.rst | 9 +++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index a9c7e21ad6..fe57b2f90e 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -308,7 +308,7 @@ class FunctionFailure(Exception): arg_types = None """ - Argument types of the function + List of argument type names of the function """ def __init__(self, summary_message, keyspace, function, arg_types): diff --git a/docs/api/cassandra.rst b/docs/api/cassandra.rst index 90d23d108e..fd0de0be0e 100644 --- a/docs/api/cassandra.rst +++ b/docs/api/cassandra.rst @@ -26,6 +26,15 @@ .. autoexception:: WriteTimeout() :members: +.. autoexception:: ReadFailure() + :members: + +.. autoexception:: WriteFailure() + :members: + +.. autoexception:: FunctionFailure() + :members: + .. autoexception:: AlreadyExists() :members: From 4136a42601ce453de51862ac9368b84e05b95c98 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 9 Apr 2015 11:58:52 -0500 Subject: [PATCH 1322/3726] Populate QueryTrace.client for C* 3.0+ --- cassandra/query.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/cassandra/query.py b/cassandra/query.py index fc71b8f62b..cbe021fe4e 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -761,6 +761,13 @@ class QueryTrace(object): A :class:`datetime.timedelta` measure of the duration of the query. """ + client = None + """ + The IP address of the client that issued this request + + This is only available when using Cassandra 3.0+ + """ + coordinator = None """ The IP address of the host that acted as coordinator for this request. @@ -829,6 +836,8 @@ def populate(self, max_wait=2.0): self.started_at = session_row.started_at self.coordinator = session_row.coordinator self.parameters = session_row.parameters + # since C* 3.0 + self.client = getattr(session_row, 'client', None) log.debug("Attempting to fetch trace events for trace ID: %s", self.trace_id) time_spent = time.time() - start From c381fa0f04dbeb9ac389fa017bac8d9cc2ded0be Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 9 Apr 2015 12:06:18 -0500 Subject: [PATCH 1323/3726] Make query use time uuid function from util previous entry point deprecated --- cassandra/query.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/query.py b/cassandra/query.py index e973ed2b2e..809ac77c05 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -26,7 +26,7 @@ import six from cassandra import ConsistencyLevel, OperationTimedOut -from cassandra.cqltypes import unix_time_from_uuid1 +from cassandra.util import unix_time_from_uuid1 from cassandra.encoder import Encoder import cassandra.encoder from cassandra.util import OrderedDict From 96d4a485ada62247796857467cbdf7db6735d9a0 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 9 Apr 2015 16:40:53 -0500 Subject: [PATCH 1324/3726] Retrive UDF metadata --- cassandra/cluster.py | 15 +++++- cassandra/metadata.py | 121 ++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 132 insertions(+), 4 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index cb04152a5b..e14c882c4a 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1820,6 +1820,7 @@ class ControlConnection(object): _SELECT_COLUMN_FAMILIES = "SELECT * FROM system.schema_columnfamilies" _SELECT_COLUMNS = "SELECT * FROM system.schema_columns" _SELECT_USERTYPES = "SELECT * FROM system.schema_usertypes" + _SELECT_FUNCTIONS = "SELECT * FROM system.schema_functions" _SELECT_TRIGGERS = "SELECT * FROM system.schema_triggers" _SELECT_PEERS = "SELECT peer, data_center, rack, tokens, rpc_address, schema_version FROM system.peers" @@ -2088,12 +2089,14 @@ def _handle_results(success, result): QueryMessage(query=self._SELECT_COLUMN_FAMILIES, consistency_level=cl), QueryMessage(query=self._SELECT_COLUMNS, consistency_level=cl), QueryMessage(query=self._SELECT_USERTYPES, consistency_level=cl), + QueryMessage(query=self._SELECT_FUNCTIONS, consistency_level=cl), QueryMessage(query=self._SELECT_TRIGGERS, consistency_level=cl) ] responses = connection.wait_for_responses(*queries, timeout=self._timeout, fail_on_error=False) (ks_success, ks_result), (cf_success, cf_result), \ (col_success, col_result), (types_success, types_result), \ + (functions_success, functions_result), \ (trigger_success, triggers_result) = responses if ks_success: @@ -2135,8 +2138,18 @@ def _handle_results(success, result): else: raise types_result + # functions were introduced in Cassandra 3.0 + if functions_success: + functions_result = dict_factory(*functions_result.results) if functions_result.results else {} + else: + if isinstance(functions_result, InvalidRequest): + log.debug("[control connection] user functions table not found") + functions_result = {} + else: + raise functions_result + log.debug("[control connection] Fetched schema, rebuilding metadata") - self._cluster.metadata.rebuild_schema(ks_result, types_result, cf_result, col_result, triggers_result) + self._cluster.metadata.rebuild_schema(ks_result, types_result, functions_result, cf_result, col_result, triggers_result) return True def refresh_node_list_and_token_map(self, force_token_rebuild=False): diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 1f88d8f9e6..896b848b5a 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -19,9 +19,10 @@ import json import logging import re +import six +from six.moves import zip from threading import RLock import weakref -import six murmur3 = None try: @@ -88,7 +89,8 @@ def export_schema_as_string(self): """ return "\n".join(ks.export_as_string() for ks in self.keyspaces.values()) - def rebuild_schema(self, ks_results, type_results, cf_results, col_results, triggers_result): + def rebuild_schema(self, ks_results, type_results, function_results, + cf_results, col_results, triggers_result): """ Rebuild the view of the current schema from a fresh set of rows from the system schema tables. @@ -98,6 +100,7 @@ def rebuild_schema(self, ks_results, type_results, cf_results, col_results, trig cf_def_rows = defaultdict(list) col_def_rows = defaultdict(lambda: defaultdict(list)) usertype_rows = defaultdict(list) + fn_rows = defaultdict(list) trigger_rows = defaultdict(lambda: defaultdict(list)) for row in cf_results: @@ -111,6 +114,9 @@ def rebuild_schema(self, ks_results, type_results, cf_results, col_results, trig for row in type_results: usertype_rows[row["keyspace_name"]].append(row) + for row in function_results: + fn_rows[row["keyspace_name"]].append(row) + for row in triggers_result: ksname = row["keyspace_name"] cfname = row["columnfamily_name"] @@ -131,6 +137,10 @@ def rebuild_schema(self, ks_results, type_results, cf_results, col_results, trig usertype = self._build_usertype(keyspace_meta.name, usertype_row) keyspace_meta.user_types[usertype.name] = usertype + for fn_row in fn_rows.get(keyspace_meta.name, []): + fn = self._build_function(keyspace_meta.name, fn_row) + keyspace_meta.functions[fn.name] = fn + current_keyspaces.add(keyspace_meta.name) old_keyspace_meta = self.keyspaces.get(keyspace_meta.name, None) self.keyspaces[keyspace_meta.name] = keyspace_meta @@ -140,7 +150,7 @@ def rebuild_schema(self, ks_results, type_results, cf_results, col_results, trig self._keyspace_added(keyspace_meta.name) # remove not-just-added keyspaces - removed_keyspaces = [ksname for ksname in self.keyspaces.keys() + removed_keyspaces = [name for name in self.keyspaces.keys() if ksname not in current_keyspaces] self.keyspaces = dict((name, meta) for name, meta in self.keyspaces.items() if name in current_keyspaces) @@ -215,6 +225,14 @@ def _build_usertype(self, keyspace, usertype_row): return UserType(usertype_row['keyspace_name'], usertype_row['type_name'], usertype_row['field_names'], type_classes) + def _build_function(self, keyspace, function_row): + return_type = types.lookup_casstype(function_row['return_type']) + return Function(function_row['keyspace_name'], function_row['function_name'], + function_row['signature'], function_row['argument_names'], + return_type, function_row['language'], function_row['body'], + function_row['is_deterministic'], function_row.get('called_on_null_input')) + # called_on_null_input is not yet merged + def _build_table_metadata(self, keyspace_metadata, row, col_rows, trigger_rows): cfname = row["columnfamily_name"] cf_col_rows = col_rows.get(cfname, []) @@ -740,12 +758,19 @@ class KeyspaceMetadata(object): .. versionadded:: 2.1.0 """ + functions = None + """ + A map from user-defined function names to instances of :class:`~cassandra.metadata..Function`. + + .. versionadded:: 3.0.0 + """ def __init__(self, name, durable_writes, strategy_class, strategy_options): self.name = name self.durable_writes = durable_writes self.replication_strategy = ReplicationStrategy.create(strategy_class, strategy_options) self.tables = {} self.user_types = {} + self.functions = {} def export_as_string(self): """ @@ -843,6 +868,96 @@ def as_cql_query(self, formatted=False): return ret +class Function(object): + """ + A user defined function, as created by ``CREATE FUNCTION`` statements. + + User-defined functions were introduced in Cassandra 3.0 + + .. versionadded:: 3.0.0 + """ + + keyspace = None + """ + The string name of the keyspace in which this function is defined + """ + + name = None + """ + The name of this function + """ + + signature = None + """ + An ordered list of the types for each argument to the function + """ + + arguemnt_names = None + """ + An ordered list of the names of each argument to the function + """ + + return_type = None + """ + Return type of the function + """ + + language = None + """ + Language of the function body + """ + + body = None + """ + Function body string + """ + + is_deterministic = None + """ + Flag indicating whether this function is deterministic + (required for functional indexes) + """ + + called_on_null_input = None + """ + Flag indicating whether this function should be called for rows with null values + (convenience function to avoid handling nulls explicitly if the result will just be null) + """ + + def __init__(self, keyspace, name, signature, argument_names, + return_type, language, body, is_deterministic, called_on_null_input): + self.keyspace = keyspace + self.name = name + self.signature = signature + self.argument_names = argument_names + self.return_type = return_type + self.language = language + self.body = body + self.is_deterministic = is_deterministic + self.called_on_null_input = called_on_null_input + + def as_cql_query(self, formatted=False): + """ + Returns a CQL query that can be used to recreate this function. + If `formatted` is set to :const:`True`, extra whitespace will + be added to make the query more readable. + """ + sep = '\n' if formatted else ' ' + keyspace = protect_name(self.keyspace) + name = protect_name(self.name) + arg_list = ', '.join(["%s %s" % (protect_name(n), t) + for n, t in zip(self.argument_names, self.signature)]) + determ = '' if self.is_deterministic else 'NON DETERMINISTIC ' + typ = self.return_type.cql_parameterized_type() + lang = self.language + body = protect_value(self.body) + + return "CREATE %(determ)sFUNCTION %(keyspace)s.%(name)s(%(arg_list)s)%(sep)s" \ + "RETURNS %(typ)s%(sep)s" \ + "LANGUAGE %(lang)s%(sep)s" \ + "AS %(body)s;" % locals() + + class TableMetadata(object): """ A representation of the schema for a single table. From 6b509718039635be9b8d3f4b3da76b4d540bda15 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 10 Apr 2015 14:20:54 -0500 Subject: [PATCH 1325/3726] Handle function schema_change events; basic tests Possibly still some server-side issues to iron out --- cassandra/cluster.py | 34 +++++---- cassandra/metadata.py | 9 +++ cassandra/protocol.py | 4 +- tests/integration/standard/test_metadata.py | 80 +++++++++++++++++++++ 4 files changed, 113 insertions(+), 14 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index e14c882c4a..96eb4044c9 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1116,7 +1116,7 @@ def _ensure_core_connections(self): for pool in session._pools.values(): pool.ensure_core_connections() - def refresh_schema(self, keyspace=None, table=None, usertype=None, max_schema_agreement_wait=None): + def refresh_schema(self, keyspace=None, table=None, usertype=None, function=None, max_schema_agreement_wait=None): """ Synchronously refresh the schema metadata. @@ -1129,7 +1129,7 @@ def refresh_schema(self, keyspace=None, table=None, usertype=None, max_schema_ag An Exception is raised if schema refresh fails for any reason. """ - if not self.control_connection.refresh_schema(keyspace, table, usertype, max_schema_agreement_wait): + if not self.control_connection.refresh_schema(keyspace, table, usertype, function, max_schema_agreement_wait): raise Exception("Schema was not refreshed. See log for details.") def submit_schema_refresh(self, keyspace=None, table=None, usertype=None): @@ -2008,7 +2008,7 @@ def shutdown(self): self._connection.close() del self._connection - def refresh_schema(self, keyspace=None, table=None, usertype=None, + def refresh_schema(self, keyspace=None, table=None, usertype=None, function=None, schema_agreement_wait=None): if not self._meta_refresh_enabled: log.debug("[control connection] Skipping schema refresh because meta refresh is disabled") @@ -2016,7 +2016,7 @@ def refresh_schema(self, keyspace=None, table=None, usertype=None, try: if self._connection: - return self._refresh_schema(self._connection, keyspace, table, usertype, + return self._refresh_schema(self._connection, keyspace, table, usertype, function, schema_agreement_wait=schema_agreement_wait) except ReferenceError: pass # our weak reference to the Cluster is no good @@ -2025,7 +2025,7 @@ def refresh_schema(self, keyspace=None, table=None, usertype=None, self._signal_error() return False - def _refresh_schema(self, connection, keyspace=None, table=None, usertype=None, + def _refresh_schema(self, connection, keyspace=None, table=None, usertype=None, function=None, preloaded_results=None, schema_agreement_wait=None): if self._cluster.is_shutdown: return False @@ -2074,6 +2074,14 @@ def _handle_results(success, result): log.debug("[control connection] Fetched user type info for %s.%s, rebuilding metadata", keyspace, usertype) types_result = dict_factory(*types_result.results) if types_result.results else {} self._cluster.metadata.usertype_changed(keyspace, usertype, types_result) + elif function: + # user defined function within this keyspace changed + where_clause = " WHERE keyspace_name = '%s' AND function_name = '%s'" % (keyspace, function) + functions_query = QueryMessage(query=self._SELECT_FUNCTIONS + where_clause, consistency_level=cl) + functions_result = connection.wait_for_response(functions_query) + log.debug("[control connection] Fetched user function info for %s.%s, rebuilding metadata", keyspace, function) + functions_result = dict_factory(*functions_result.results) if functions_result.results else {} + self._cluster.metadata.function_changed(keyspace, function, functions_result) elif keyspace: # only the keyspace itself changed (such as replication settings) where_clause = " WHERE keyspace_name = '%s'" % (keyspace,) @@ -2297,8 +2305,9 @@ def _handle_schema_change(self, event): keyspace = event.get('keyspace') table = event.get('table') usertype = event.get('type') + function = event.get('function', event.get('aggregate')) delay = random() * self._schema_event_refresh_window - self._cluster.scheduler.schedule_unique(delay, self.refresh_schema, keyspace, table, usertype) + self._cluster.scheduler.schedule_unique(delay, self.refresh_schema, keyspace, table, usertype, function) def wait_for_schema_agreement(self, connection=None, preloaded_results=None, wait_time=None): @@ -2527,19 +2536,19 @@ def _log_if_failed(self, future): exc_info=exc) -def refresh_schema_and_set_result(keyspace, table, usertype, control_conn, response_future): +def refresh_schema_and_set_result(keyspace, table, usertype, function, control_conn, response_future): try: if control_conn._meta_refresh_enabled: - log.debug("Refreshing schema in response to schema change. Keyspace: %s; Table: %s, Type: %s", - keyspace, table, usertype) - control_conn._refresh_schema(response_future._connection, keyspace, table, usertype) + log.debug("Refreshing schema in response to schema change. Keyspace: %s; Table: %s, Type: %s, Function: %s", + keyspace, table, usertype, function) + control_conn._refresh_schema(response_future._connection, keyspace, table, usertype, function) else: log.debug("Skipping schema refresh in response to schema change because meta refresh is disabled; " - "Keyspace: %s; Table: %s, Type: %s", keyspace, table, usertype) + "Keyspace: %s; Table: %s, Type: %s, Function: %s", keyspace, table, usertype, function) except Exception: log.exception("Exception refreshing schema in response to schema change:") response_future.session.submit( - control_conn.refresh_schema, keyspace, table, usertype) + control_conn.refresh_schema, keyspace, table, usertype, function) finally: response_future._set_final_result(None) @@ -2724,6 +2733,7 @@ def _set_result(self, response): response.results['keyspace'], response.results.get('table'), response.results.get('type'), + response.results.get('function', response.results.get('aggregate')), self.session.cluster.control_connection, self) else: diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 896b848b5a..d6cd8e5840 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -170,6 +170,7 @@ def keyspace_changed(self, keyspace, ks_results): if old_keyspace_meta: keyspace_meta.tables = old_keyspace_meta.tables keyspace_meta.user_types = old_keyspace_meta.user_types + keyspace_meta.functions = old_keyspace_meta.functions if (keyspace_meta.replication_strategy != old_keyspace_meta.replication_strategy): self._keyspace_updated(keyspace) else: @@ -183,6 +184,14 @@ def usertype_changed(self, keyspace, name, type_results): # the type was deleted self.keyspaces[keyspace].user_types.pop(name, None) + def function_changed(self, keyspace, name, function_results): + if function_results: + new_function = self._build_function(keyspace, function_results[0]) + self.keyspaces[keyspace].functions[name] = new_function + else: + # the function was deleted + self.keyspaces[keyspace].functions.pop(name, None) + def table_changed(self, keyspace, table, cf_results, col_results, triggers_result): try: keyspace_meta = self.keyspaces[keyspace] diff --git a/cassandra/protocol.py b/cassandra/protocol.py index d81a650731..922925a526 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -851,8 +851,8 @@ def recv_schema_change(cls, f, protocol_version): target = read_string(f) keyspace = read_string(f) if target != "KEYSPACE": - table_or_type = read_string(f) - return {'change_type': change_type, 'keyspace': keyspace, target.lower(): table_or_type} + target_name = read_string(f) + return {'change_type': change_type, 'keyspace': keyspace, target.lower(): target_name} else: return {'change_type': change_type, 'keyspace': keyspace} else: diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index e5b2eab3ce..367b9ee7fe 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -928,3 +928,83 @@ def test_keyspace_alter(self): new_keyspace_meta = self.cluster.metadata.keyspaces[name] self.assertNotEqual(original_keyspace_meta, new_keyspace_meta) self.assertEqual(new_keyspace_meta.durable_writes, False) + +from cassandra.cqltypes import DoubleType +from cassandra.metadata import Function + + +class FunctionMetadata(unittest.TestCase): + + keyspace_name = "functionmetadatatest" + + @property + def function_name(self): + return self._testMethodName.lower() + + @classmethod + def setup_class(cls): + if PROTOCOL_VERSION < 4: + raise unittest.SkipTest("Function metadata requires native protocol version 4+") + + cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION) + cls.session = cls.cluster.connect() + cls.session.execute("CREATE KEYSPACE %s WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}" % cls.keyspace_name) + cls.keyspace_function_meta = cls.cluster.metadata.keyspaces[cls.keyspace_name].functions + + @classmethod + def teardown_class(cls): + cls.session.execute("DROP KEYSPACE %s" % cls.keyspace_name) + cls.cluster.shutdown() + + def make_function_kwargs(self, deterministic=True, called_on_null=True): + return {'keyspace': self.keyspace_name, + 'name': self.function_name, + 'signature': ['double', 'int'], + 'argument_names': ['d', 'i'], + 'return_type': DoubleType, + 'language': 'java', + 'body': 'return new Double(0.0);', + 'is_deterministic': deterministic, + 'called_on_null_input': called_on_null} + + def test_create_drop_function(self): + self.assertNotIn(self.function_name, self.keyspace_function_meta) + + expected_meta = Function(**self.make_function_kwargs()) + self.session.execute(expected_meta.as_cql_query()) + self.assertIn(self.function_name, self.keyspace_function_meta) + + generated_meta = self.keyspace_function_meta[self.function_name] + self.assertEqual(generated_meta.as_cql_query(), expected_meta.as_cql_query()) + + self.session.execute("DROP FUNCTION %s.%s" % (self.keyspace_name, self.function_name)) + self.assertNotIn(self.function_name, self.keyspace_function_meta) + + # TODO: this presently fails because C* c059a56 requires udt to be frozen to create, but does not store meta indicating frozen + @unittest.expectedFailure + def test_functions_after_udt(self): + self.assertNotIn(self.function_name, self.keyspace_function_meta) + + udt_name = 'udtx' + self.session.execute("CREATE TYPE %s.%s (x int)" % (self.keyspace_name, udt_name)) + + # make a function that takes a udt type + kwargs = self.make_function_kwargs() + kwargs['signature'][0] = "frozen<%s>" % udt_name + + expected_meta = Function(**kwargs) + self.session.execute(expected_meta.as_cql_query()) + self.assertIn(self.function_name, self.keyspace_function_meta) + + generated_meta = self.keyspace_function_meta[self.function_name] + self.assertEqual(generated_meta.as_cql_query(), expected_meta.as_cql_query()) + + # udts must come before functions in keyspace dump + keyspace_cql = self.cluster.metadata.keyspaces[self.keyspace_name].export_as_string() + type_idx = keyspace_cql.rfind("CREATE TYPE") + func_idx = keyspace_cql.find("CREATE FUCNTION") + self.assertNotIn(-1, (type_idx, func_idx)) + self.assertGreater(func_idx, type_idx) + + self.session.execute("DROP FUNCTION %s.%s" % (self.keyspace_name, self.function_name)) + self.assertNotIn(self.function_name, self.keyspace_function_meta) From 1e420158a9dd6124f449205a3fe9424b86076288 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 10 Apr 2015 16:19:24 -0500 Subject: [PATCH 1326/3726] improve and expond function metadata tests --- tests/integration/standard/test_metadata.py | 100 ++++++++++++++------ 1 file changed, 70 insertions(+), 30 deletions(-) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 367b9ee7fe..b8df0738e7 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -956,6 +956,27 @@ def teardown_class(cls): cls.session.execute("DROP KEYSPACE %s" % cls.keyspace_name) cls.cluster.shutdown() + class VerifiedFunction(object): + def __init__(self, test_case, **function_kwargs): + self.test_case = test_case + self.function_kwargs = function_kwargs + + def __enter__(self): + tc = self.test_case + expected_meta = Function(**self.function_kwargs) + tc.assertNotIn(expected_meta.name, tc.keyspace_function_meta) + tc.session.execute(expected_meta.as_cql_query()) + tc.assertIn(expected_meta.name, tc.keyspace_function_meta) + + generated_meta = tc.keyspace_function_meta[expected_meta.name] + self.test_case.assertEqual(generated_meta.as_cql_query(), expected_meta.as_cql_query()) + + def __exit__(self, exc_type, exc_val, exc_tb): + tc = self.test_case + function_name = self.function_kwargs['name'] + tc.session.execute("DROP FUNCTION %s.%s" % (tc.keyspace_name, function_name)) + tc.assertNotIn(function_name, tc.keyspace_function_meta) + def make_function_kwargs(self, deterministic=True, called_on_null=True): return {'keyspace': self.keyspace_name, 'name': self.function_name, @@ -968,43 +989,62 @@ def make_function_kwargs(self, deterministic=True, called_on_null=True): 'called_on_null_input': called_on_null} def test_create_drop_function(self): - self.assertNotIn(self.function_name, self.keyspace_function_meta) - - expected_meta = Function(**self.make_function_kwargs()) - self.session.execute(expected_meta.as_cql_query()) - self.assertIn(self.function_name, self.keyspace_function_meta) - - generated_meta = self.keyspace_function_meta[self.function_name] - self.assertEqual(generated_meta.as_cql_query(), expected_meta.as_cql_query()) - - self.session.execute("DROP FUNCTION %s.%s" % (self.keyspace_name, self.function_name)) - self.assertNotIn(self.function_name, self.keyspace_function_meta) + with self.VerifiedFunction(self, **self.make_function_kwargs()): + pass - # TODO: this presently fails because C* c059a56 requires udt to be frozen to create, but does not store meta indicating frozen - @unittest.expectedFailure def test_functions_after_udt(self): self.assertNotIn(self.function_name, self.keyspace_function_meta) udt_name = 'udtx' self.session.execute("CREATE TYPE %s.%s (x int)" % (self.keyspace_name, udt_name)) - # make a function that takes a udt type + # Ideally we would make a function that takes a udt type, but + # this presently fails because C* c059a56 requires udt to be frozen to create, but does not store meta indicating frozen + # Maybe update this after release + #kwargs = self.make_function_kwargs() + #kwargs['signature'][0] = "frozen<%s>" % udt_name + + #expected_meta = Function(**kwargs) + #with self.VerifiedFunction(self, **kwargs): + with self.VerifiedFunction(self, **self.make_function_kwargs()): + # udts must come before functions in keyspace dump + keyspace_cql = self.cluster.metadata.keyspaces[self.keyspace_name].export_as_string() + type_idx = keyspace_cql.rfind("CREATE TYPE") + func_idx = keyspace_cql.find("CREATE FUNCTION") + self.assertNotIn(-1, (type_idx, func_idx), "TYPE or FUNCTION not found in keyspace_cql: " + keyspace_cql) + self.assertGreater(func_idx, type_idx) + + def test_functions_follow_keyspace_alter(self): + with self.VerifiedFunction(self, **self.make_function_kwargs()): + original_keyspace_meta = self.cluster.metadata.keyspaces[self.keyspace_name] + self.session.execute('ALTER KEYSPACE %s WITH durable_writes = false' % self.keyspace_name) + try: + new_keyspace_meta = self.cluster.metadata.keyspaces[self.keyspace_name] + self.assertNotEqual(original_keyspace_meta, new_keyspace_meta) + self.assertIs(original_keyspace_meta.functions, new_keyspace_meta.functions) + finally: + self.session.execute('ALTER KEYSPACE %s WITH durable_writes = true' % self.keyspace_name) + + def test_function_cql_determinism(self): kwargs = self.make_function_kwargs() - kwargs['signature'][0] = "frozen<%s>" % udt_name - - expected_meta = Function(**kwargs) - self.session.execute(expected_meta.as_cql_query()) - self.assertIn(self.function_name, self.keyspace_function_meta) - - generated_meta = self.keyspace_function_meta[self.function_name] - self.assertEqual(generated_meta.as_cql_query(), expected_meta.as_cql_query()) + kwargs['is_deterministic'] = True + with self.VerifiedFunction(self, **kwargs): + fn_meta = self.keyspace_function_meta[self.function_name] + self.assertRegexpMatches(fn_meta.as_cql_query(), "CREATE FUNCTION.*") - # udts must come before functions in keyspace dump - keyspace_cql = self.cluster.metadata.keyspaces[self.keyspace_name].export_as_string() - type_idx = keyspace_cql.rfind("CREATE TYPE") - func_idx = keyspace_cql.find("CREATE FUCNTION") - self.assertNotIn(-1, (type_idx, func_idx)) - self.assertGreater(func_idx, type_idx) + kwargs['is_deterministic'] = False + with self.VerifiedFunction(self, **kwargs): + fn_meta = self.keyspace_function_meta[self.function_name] + self.assertRegexpMatches(fn_meta.as_cql_query(), "CREATE NON DETERMINISTIC FUNCTION.*") - self.session.execute("DROP FUNCTION %s.%s" % (self.keyspace_name, self.function_name)) - self.assertNotIn(self.function_name, self.keyspace_function_meta) + def test_function_cql_called_on_null(self): + kwargs = self.make_function_kwargs() + kwargs['called_on_null_input'] = True + with self.VerifiedFunction(self, **kwargs): + fn_meta = self.keyspace_function_meta[self.function_name] + self.assertRegexpMatches(fn_meta.as_cql_query(), "CREATE FUNCTION.*\) CALLED ON NULL INPUT RETURNS .*") + + kwargs['called_on_null_input'] = False + with self.VerifiedFunction(self, **kwargs): + fn_meta = self.keyspace_function_meta[self.function_name] + self.assertRegexpMatches(fn_meta.as_cql_query(), "CREATE FUNCTION.*\) NOT CALLED ON NULL INPUT RETURNS .*") From fd9df7bd58f68b6f05dd85d1d0fcad090890af75 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 10 Apr 2015 16:20:59 -0500 Subject: [PATCH 1327/3726] commit to called_on_null_input schema for CASSANDRA-8374 --- cassandra/metadata.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index d6cd8e5840..abb2d60a62 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -239,8 +239,7 @@ def _build_function(self, keyspace, function_row): return Function(function_row['keyspace_name'], function_row['function_name'], function_row['signature'], function_row['argument_names'], return_type, function_row['language'], function_row['body'], - function_row['is_deterministic'], function_row.get('called_on_null_input')) - # called_on_null_input is not yet merged + function_row['is_deterministic'], function_row['called_on_null_input']) def _build_table_metadata(self, keyspace_metadata, row, col_rows, trigger_rows): cfname = row["columnfamily_name"] @@ -786,7 +785,10 @@ def export_as_string(self): Returns a CQL query string that can be used to recreate the entire keyspace, including user-defined types and tables. """ - return "\n\n".join([self.as_cql_query()] + self.user_type_strings() + [t.export_as_string() for t in self.tables.values()]) + return "\n\n".join([self.as_cql_query()] + + self.user_type_strings() + + [f.as_cql_query(True) for f in self.functions.values()] + + [t.export_as_string() for t in self.tables.values()]) def as_cql_query(self): """ @@ -960,8 +962,10 @@ def as_cql_query(self, formatted=False): typ = self.return_type.cql_parameterized_type() lang = self.language body = protect_value(self.body) + on_null = "CALLED" if self.called_on_null_input else "RETURNS NULL" return "CREATE %(determ)sFUNCTION %(keyspace)s.%(name)s(%(arg_list)s)%(sep)s" \ + "%(on_null)s ON NULL INPUT%(sep)s" \ "RETURNS %(typ)s%(sep)s" \ "LANGUAGE %(lang)s%(sep)s" \ "AS %(body)s;" % locals() From 527c6c05bca436c6f84651a333872e5cf5ea5c8d Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 13 Apr 2015 14:19:37 -0500 Subject: [PATCH 1328/3726] Handle overloaded user functions with differing type signatures --- cassandra/__init__.py | 25 +++++++++++++++++++++++++ cassandra/cluster.py | 9 +++++---- cassandra/metadata.py | 21 +++++++++++++-------- cassandra/protocol.py | 13 ++++++++----- 4 files changed, 51 insertions(+), 17 deletions(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 296601cbf1..d1ad0afe0d 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -302,3 +302,28 @@ class UnsupportedOperation(Exception): for more details. """ pass + + +class UserFunctionDescriptor(object): + """ + Describes a User function or aggregate by name and argument signature + """ + + name = None + + type_signature = None + """ + Ordered list of CQL argument types + """ + + def __init__(self, name, type_signature): + self.name = name + self.type_signature = type_signature + + @property + def signature(self): + return self.format_signature(self.name, self.type_signature) + + @staticmethod + def format_signature(name, type_signature): + return "%s(%s)" % (name, ','.join(t for t in type_signature)) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 96eb4044c9..e1f5ec6f6c 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2076,10 +2076,11 @@ def _handle_results(success, result): self._cluster.metadata.usertype_changed(keyspace, usertype, types_result) elif function: # user defined function within this keyspace changed - where_clause = " WHERE keyspace_name = '%s' AND function_name = '%s'" % (keyspace, function) + where_clause = " WHERE keyspace_name = '%s' AND function_name = '%s' AND signature = [%s]" \ + % (keyspace, function.name, ','.join("'%s'" % t for t in function.type_signature)) functions_query = QueryMessage(query=self._SELECT_FUNCTIONS + where_clause, consistency_level=cl) functions_result = connection.wait_for_response(functions_query) - log.debug("[control connection] Fetched user function info for %s.%s, rebuilding metadata", keyspace, function) + log.debug("[control connection] Fetched user function info for %s.%s, rebuilding metadata", keyspace, function.signature) functions_result = dict_factory(*functions_result.results) if functions_result.results else {} self._cluster.metadata.function_changed(keyspace, function, functions_result) elif keyspace: @@ -2305,7 +2306,7 @@ def _handle_schema_change(self, event): keyspace = event.get('keyspace') table = event.get('table') usertype = event.get('type') - function = event.get('function', event.get('aggregate')) + function = event.get('function') delay = random() * self._schema_event_refresh_window self._cluster.scheduler.schedule_unique(delay, self.refresh_schema, keyspace, table, usertype, function) @@ -2733,7 +2734,7 @@ def _set_result(self, response): response.results['keyspace'], response.results.get('table'), response.results.get('type'), - response.results.get('function', response.results.get('aggregate')), + response.results.get('function'), self.session.cluster.control_connection, self) else: diff --git a/cassandra/metadata.py b/cassandra/metadata.py index abb2d60a62..32edd01f9c 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -30,6 +30,7 @@ except ImportError as e: pass +from cassandra import UserFunctionDescriptor import cassandra.cqltypes as types from cassandra.marshal import varint_unpack from cassandra.pool import Host @@ -139,7 +140,7 @@ def rebuild_schema(self, ks_results, type_results, function_results, for fn_row in fn_rows.get(keyspace_meta.name, []): fn = self._build_function(keyspace_meta.name, fn_row) - keyspace_meta.functions[fn.name] = fn + keyspace_meta.functions[fn.signature] = fn current_keyspaces.add(keyspace_meta.name) old_keyspace_meta = self.keyspaces.get(keyspace_meta.name, None) @@ -184,13 +185,13 @@ def usertype_changed(self, keyspace, name, type_results): # the type was deleted self.keyspaces[keyspace].user_types.pop(name, None) - def function_changed(self, keyspace, name, function_results): + def function_changed(self, keyspace, function, function_results): if function_results: new_function = self._build_function(keyspace, function_results[0]) - self.keyspaces[keyspace].functions[name] = new_function + self.keyspaces[keyspace].functions[function.signature] = new_function else: # the function was deleted - self.keyspaces[keyspace].functions.pop(name, None) + self.keyspaces[keyspace].functions.pop(function.signature, None) def table_changed(self, keyspace, table, cf_results, col_results, triggers_result): try: @@ -898,7 +899,7 @@ class Function(object): The name of this function """ - signature = None + type_signature = None """ An ordered list of the types for each argument to the function """ @@ -935,11 +936,11 @@ class Function(object): (convenience function to avoid handling nulls explicitly if the result will just be null) """ - def __init__(self, keyspace, name, signature, argument_names, + def __init__(self, keyspace, name, type_signature, argument_names, return_type, language, body, is_deterministic, called_on_null_input): self.keyspace = keyspace self.name = name - self.signature = signature + self.type_signature = type_signature self.argument_names = argument_names self.return_type = return_type self.language = language @@ -957,7 +958,7 @@ def as_cql_query(self, formatted=False): keyspace = protect_name(self.keyspace) name = protect_name(self.name) arg_list = ', '.join(["%s %s" % (protect_name(n), t) - for n, t in zip(self.argument_names, self.signature)]) + for n, t in zip(self.argument_names, self.type_signature)]) determ = '' if self.is_deterministic else 'NON DETERMINISTIC ' typ = self.return_type.cql_parameterized_type() lang = self.language @@ -970,6 +971,10 @@ def as_cql_query(self, formatted=False): "LANGUAGE %(lang)s%(sep)s" \ "AS %(body)s;" % locals() + @property + def signature(self): + return UserFunctionDescriptor.format_signature(self.name, self.type_signature) + class TableMetadata(object): """ diff --git a/cassandra/protocol.py b/cassandra/protocol.py index 922925a526..9ec114e740 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -23,7 +23,7 @@ from cassandra import (Unavailable, WriteTimeout, ReadTimeout, AlreadyExists, InvalidRequest, Unauthorized, - UnsupportedOperation) + UnsupportedOperation, UserFunctionDescriptor) from cassandra.marshal import (int32_pack, int32_unpack, uint16_pack, uint16_unpack, int8_pack, int8_unpack, uint64_pack, header_pack, v3_header_pack) @@ -850,15 +850,18 @@ def recv_schema_change(cls, f, protocol_version): if protocol_version >= 3: target = read_string(f) keyspace = read_string(f) + event = {'change_type': change_type, 'keyspace': keyspace} if target != "KEYSPACE": target_name = read_string(f) - return {'change_type': change_type, 'keyspace': keyspace, target.lower(): target_name} - else: - return {'change_type': change_type, 'keyspace': keyspace} + if target in ('FUNCTION', 'AGGREGATE'): + event['function'] = UserFunctionDescriptor(target_name, [read_string(f) for _ in range(read_short(f))]) + else: + event[target.lower()] = target_name else: keyspace = read_string(f) table = read_string(f) - return {'change_type': change_type, 'keyspace': keyspace, 'table': table} + event = {'change_type': change_type, 'keyspace': keyspace, 'table': table} + return event def write_header(f, version, flags, stream_id, opcode, length): From d2a0931c7db658d590030361f1887e61d510d3fb Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 13 Apr 2015 14:22:05 -0500 Subject: [PATCH 1329/3726] Finalize user function meta integration tests - handle signatures - new signature overload test - proper called_on_null regex --- tests/integration/standard/test_metadata.py | 56 ++++++++++++++------- 1 file changed, 37 insertions(+), 19 deletions(-) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index b8df0738e7..05c8ba5611 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -22,7 +22,7 @@ import six import sys -from cassandra import AlreadyExists +from cassandra import AlreadyExists, UserFunctionDescriptor from cassandra.cluster import Cluster from cassandra.metadata import (Metadata, KeyspaceMetadata, TableMetadata, @@ -959,28 +959,33 @@ def teardown_class(cls): class VerifiedFunction(object): def __init__(self, test_case, **function_kwargs): self.test_case = test_case - self.function_kwargs = function_kwargs + self.function_kwargs = dict(function_kwargs) def __enter__(self): tc = self.test_case expected_meta = Function(**self.function_kwargs) - tc.assertNotIn(expected_meta.name, tc.keyspace_function_meta) + tc.assertNotIn(expected_meta.signature, tc.keyspace_function_meta) tc.session.execute(expected_meta.as_cql_query()) - tc.assertIn(expected_meta.name, tc.keyspace_function_meta) + tc.assertIn(expected_meta.signature, tc.keyspace_function_meta) - generated_meta = tc.keyspace_function_meta[expected_meta.name] + generated_meta = tc.keyspace_function_meta[expected_meta.signature] self.test_case.assertEqual(generated_meta.as_cql_query(), expected_meta.as_cql_query()) + return self def __exit__(self, exc_type, exc_val, exc_tb): tc = self.test_case - function_name = self.function_kwargs['name'] - tc.session.execute("DROP FUNCTION %s.%s" % (tc.keyspace_name, function_name)) - tc.assertNotIn(function_name, tc.keyspace_function_meta) + tc.session.execute("DROP FUNCTION %s.%s" % (tc.keyspace_name, self.signature)) + tc.assertNotIn(self.signature, tc.keyspace_function_meta) + + @property + def signature(self): + return UserFunctionDescriptor.format_signature(self.function_kwargs['name'], + self.function_kwargs['type_signature']) def make_function_kwargs(self, deterministic=True, called_on_null=True): return {'keyspace': self.keyspace_name, 'name': self.function_name, - 'signature': ['double', 'int'], + 'type_signature': ['double', 'int'], 'argument_names': ['d', 'i'], 'return_type': DoubleType, 'language': 'java', @@ -1002,7 +1007,7 @@ def test_functions_after_udt(self): # this presently fails because C* c059a56 requires udt to be frozen to create, but does not store meta indicating frozen # Maybe update this after release #kwargs = self.make_function_kwargs() - #kwargs['signature'][0] = "frozen<%s>" % udt_name + #kwargs['type_signature'][0] = "frozen<%s>" % udt_name #expected_meta = Function(**kwargs) #with self.VerifiedFunction(self, **kwargs): @@ -1014,6 +1019,19 @@ def test_functions_after_udt(self): self.assertNotIn(-1, (type_idx, func_idx), "TYPE or FUNCTION not found in keyspace_cql: " + keyspace_cql) self.assertGreater(func_idx, type_idx) + def test_function_same_name_diff_types(self): + kwargs = self.make_function_kwargs() + with self.VerifiedFunction(self, **kwargs): + # another function: same name, different type sig. + self.assertGreater(len(kwargs['type_signature']), 1) + self.assertGreater(len(kwargs['argument_names']), 1) + kwargs['type_signature'] = kwargs['type_signature'][:1] + kwargs['argument_names'] = kwargs['argument_names'][:1] + with self.VerifiedFunction(self, **kwargs): + functions = [f for f in self.keyspace_function_meta.values() if f.name == self.function_name] + self.assertEqual(len(functions), 2) + self.assertNotEqual(functions[0].type_signature, functions[1].type_signature) + def test_functions_follow_keyspace_alter(self): with self.VerifiedFunction(self, **self.make_function_kwargs()): original_keyspace_meta = self.cluster.metadata.keyspaces[self.keyspace_name] @@ -1028,23 +1046,23 @@ def test_functions_follow_keyspace_alter(self): def test_function_cql_determinism(self): kwargs = self.make_function_kwargs() kwargs['is_deterministic'] = True - with self.VerifiedFunction(self, **kwargs): - fn_meta = self.keyspace_function_meta[self.function_name] + with self.VerifiedFunction(self, **kwargs) as vf: + fn_meta = self.keyspace_function_meta[vf.signature] self.assertRegexpMatches(fn_meta.as_cql_query(), "CREATE FUNCTION.*") kwargs['is_deterministic'] = False - with self.VerifiedFunction(self, **kwargs): - fn_meta = self.keyspace_function_meta[self.function_name] + with self.VerifiedFunction(self, **kwargs) as vf: + fn_meta = self.keyspace_function_meta[vf.signature] self.assertRegexpMatches(fn_meta.as_cql_query(), "CREATE NON DETERMINISTIC FUNCTION.*") def test_function_cql_called_on_null(self): kwargs = self.make_function_kwargs() kwargs['called_on_null_input'] = True - with self.VerifiedFunction(self, **kwargs): - fn_meta = self.keyspace_function_meta[self.function_name] + with self.VerifiedFunction(self, **kwargs) as vf: + fn_meta = self.keyspace_function_meta[vf.signature] self.assertRegexpMatches(fn_meta.as_cql_query(), "CREATE FUNCTION.*\) CALLED ON NULL INPUT RETURNS .*") kwargs['called_on_null_input'] = False - with self.VerifiedFunction(self, **kwargs): - fn_meta = self.keyspace_function_meta[self.function_name] - self.assertRegexpMatches(fn_meta.as_cql_query(), "CREATE FUNCTION.*\) NOT CALLED ON NULL INPUT RETURNS .*") + with self.VerifiedFunction(self, **kwargs) as vf: + fn_meta = self.keyspace_function_meta[vf.signature] + self.assertRegexpMatches(fn_meta.as_cql_query(), "CREATE FUNCTION.*\) RETURNS NULL ON NULL INPUT RETURNS .*") From 446a10687d0ef5b2b30482e3c37d57cf76b3a752 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 13 Apr 2015 15:12:48 -0500 Subject: [PATCH 1330/3726] User function API docs --- cassandra/__init__.py | 58 ++++++++++++++++++++++++------------------ cassandra/cluster.py | 31 +++++++++++++++++----- docs/api/cassandra.rst | 3 +++ 3 files changed, 61 insertions(+), 31 deletions(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index d1ad0afe0d..2d41a8eaed 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -126,6 +126,39 @@ def consistency_value_to_name(value): return ConsistencyLevel.value_to_name[value] if value is not None else "Not Set" +class UserFunctionDescriptor(object): + """ + Describes a User function or aggregate by name and argument signature + """ + + name = None + """ + name of the function + """ + + type_signature = None + """ + Ordered list of CQL argument type name comprising the type signature + """ + + def __init__(self, name, type_signature): + self.name = name + self.type_signature = type_signature + + @property + def signature(self): + """ + function signatue string in the form 'name([type0[,type1[...]]])' + + can be used to uniquely identify overloaded function names within a keyspace + """ + return self.format_signature(self.name, self.type_signature) + + @staticmethod + def format_signature(name, type_signature): + return "%s(%s)" % (name, ','.join(t for t in type_signature)) + + class Unavailable(Exception): """ There were not enough live replicas to satisfy the requested consistency @@ -302,28 +335,3 @@ class UnsupportedOperation(Exception): for more details. """ pass - - -class UserFunctionDescriptor(object): - """ - Describes a User function or aggregate by name and argument signature - """ - - name = None - - type_signature = None - """ - Ordered list of CQL argument types - """ - - def __init__(self, name, type_signature): - self.name = name - self.type_signature = type_signature - - @property - def signature(self): - return self.format_signature(self.name, self.type_signature) - - @staticmethod - def format_signature(name, type_signature): - return "%s(%s)" % (name, ','.join(t for t in type_signature)) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index e1f5ec6f6c..b3172e788e 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1116,9 +1116,27 @@ def _ensure_core_connections(self): for pool in session._pools.values(): pool.ensure_core_connections() + def _validate_refresh_schema(self, keyspace, table, usertype, function): + if any((table, usertype, function)): + if not keyspace: + raise ValueError("keyspace is required to refresh specific sub-entity {table, usertype, function}") + if sum(1 for e in (table, usertype, function) if e) > 1: + raise ValueError("{table, usertype, function} are mutually exclusive") + def refresh_schema(self, keyspace=None, table=None, usertype=None, function=None, max_schema_agreement_wait=None): """ - Synchronously refresh the schema metadata. + Synchronously refresh all schema metadata. + + {keyspace, table, usertype} are string names of the respective entities. + ``function`` is a :class:`cassandra.UserFunctionDescriptor`. + + If none of ``{keyspace, table, usertype, function}`` are specified, the entire schema is refreshed. + + If any of ``{keyspace, table, usertype, function}`` are specified, ``keyspace`` is required. + + If only ``keyspace`` is specified, just the top-level keyspace metadata is refreshed (e.g. replication). + + The remaining arguments ``{table, usertype, function}`` are mutually exclusive -- only one may be specified. By default, the timeout for this operation is governed by :attr:`~.Cluster.max_schema_agreement_wait` and :attr:`~.Cluster.control_connection_timeout`. @@ -1129,17 +1147,18 @@ def refresh_schema(self, keyspace=None, table=None, usertype=None, function=None An Exception is raised if schema refresh fails for any reason. """ + self._validate_refresh_schema(keyspace, table, usertype, function) if not self.control_connection.refresh_schema(keyspace, table, usertype, function, max_schema_agreement_wait): raise Exception("Schema was not refreshed. See log for details.") - def submit_schema_refresh(self, keyspace=None, table=None, usertype=None): + def submit_schema_refresh(self, keyspace=None, table=None, usertype=None, function=None): """ Schedule a refresh of the internal representation of the current - schema for this cluster. If `keyspace` is specified, only that - keyspace will be refreshed, and likewise for `table`. + schema for this cluster. See :meth:`~.refresh_schema` for description of parameters. """ + self._validate_refresh_schema(keyspace, table, usertype, function) return self.executor.submit( - self.control_connection.refresh_schema, keyspace, table, usertype) + self.control_connection.refresh_schema, keyspace, table, usertype, function) def refresh_nodes(self): """ @@ -2030,7 +2049,7 @@ def _refresh_schema(self, connection, keyspace=None, table=None, usertype=None, if self._cluster.is_shutdown: return False - assert table is None or usertype is None + assert sum(1 for arg in (table, usertype, function) if arg) <= 1 agreed = self.wait_for_schema_agreement(connection, preloaded_results=preloaded_results, diff --git a/docs/api/cassandra.rst b/docs/api/cassandra.rst index 90d23d108e..91500d0f8a 100644 --- a/docs/api/cassandra.rst +++ b/docs/api/cassandra.rst @@ -14,6 +14,9 @@ .. autoclass:: ConsistencyLevel :members: +.. autoclass:: UserFunctionDescriptor + :members: + .. autoexception:: Unavailable() :members: From bd51fb978663cee3ec77295dad9ca3b233a69564 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 14 Apr 2015 16:51:38 -0500 Subject: [PATCH 1331/3726] Add meta handling for AGGREGATE --- cassandra/__init__.py | 47 ++++++++++----- cassandra/cluster.py | 80 +++++++++++++++++-------- cassandra/metadata.py | 136 ++++++++++++++++++++++++++++++++++++++++-- cassandra/protocol.py | 7 ++- 4 files changed, 223 insertions(+), 47 deletions(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 2d41a8eaed..10fd6158a6 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -126,20 +126,7 @@ def consistency_value_to_name(value): return ConsistencyLevel.value_to_name[value] if value is not None else "Not Set" -class UserFunctionDescriptor(object): - """ - Describes a User function or aggregate by name and argument signature - """ - - name = None - """ - name of the function - """ - - type_signature = None - """ - Ordered list of CQL argument type name comprising the type signature - """ +class SignatureDescriptor(object): def __init__(self, name, type_signature): self.name = name @@ -159,6 +146,38 @@ def format_signature(name, type_signature): return "%s(%s)" % (name, ','.join(t for t in type_signature)) +class UserFunctionDescriptor(SignatureDescriptor): + """ + Describes a User function by name and argument signature + """ + + name = None + """ + name of the function + """ + + type_signature = None + """ + Ordered list of CQL argument type name comprising the type signature + """ + + +class UserAggregateDescriptor(SignatureDescriptor): + """ + Describes a User aggregate function by name and argument signature + """ + + name = None + """ + name of the aggregate + """ + + type_signature = None + """ + Ordered list of CQL argument type name comprising the type signature + """ + + class Unavailable(Exception): """ There were not enough live replicas to satisfy the requested consistency diff --git a/cassandra/cluster.py b/cassandra/cluster.py index b3172e788e..8832bb8f3a 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1116,27 +1116,29 @@ def _ensure_core_connections(self): for pool in session._pools.values(): pool.ensure_core_connections() - def _validate_refresh_schema(self, keyspace, table, usertype, function): - if any((table, usertype, function)): + def _validate_refresh_schema(self, keyspace, table, usertype, function, aggregate): + if any((table, usertype, function, aggregate)): if not keyspace: - raise ValueError("keyspace is required to refresh specific sub-entity {table, usertype, function}") + raise ValueError("keyspace is required to refresh specific sub-entity {table, usertype, function, aggregate}") if sum(1 for e in (table, usertype, function) if e) > 1: - raise ValueError("{table, usertype, function} are mutually exclusive") + raise ValueError("{table, usertype, function, aggregate} are mutually exclusive") - def refresh_schema(self, keyspace=None, table=None, usertype=None, function=None, max_schema_agreement_wait=None): + def refresh_schema(self, keyspace=None, table=None, usertype=None, function=None, aggregate=None, max_schema_agreement_wait=None): """ - Synchronously refresh all schema metadata. + Synchronously refresh schema metadata. {keyspace, table, usertype} are string names of the respective entities. ``function`` is a :class:`cassandra.UserFunctionDescriptor`. + ``aggregate`` is a :class:`cassandra.UserAggregateDescriptor`. - If none of ``{keyspace, table, usertype, function}`` are specified, the entire schema is refreshed. + If none of ``{keyspace, table, usertype, function, aggregate}`` are specified, the entire schema is refreshed. - If any of ``{keyspace, table, usertype, function}`` are specified, ``keyspace`` is required. + If any of ``{keyspace, table, usertype, function, aggregate}`` are specified, ``keyspace`` is required. If only ``keyspace`` is specified, just the top-level keyspace metadata is refreshed (e.g. replication). - The remaining arguments ``{table, usertype, function}`` are mutually exclusive -- only one may be specified. + The remaining arguments ``{table, usertype, function, aggregate}`` + are mutually exclusive -- only one may be specified. By default, the timeout for this operation is governed by :attr:`~.Cluster.max_schema_agreement_wait` and :attr:`~.Cluster.control_connection_timeout`. @@ -1147,8 +1149,9 @@ def refresh_schema(self, keyspace=None, table=None, usertype=None, function=None An Exception is raised if schema refresh fails for any reason. """ - self._validate_refresh_schema(keyspace, table, usertype, function) - if not self.control_connection.refresh_schema(keyspace, table, usertype, function, max_schema_agreement_wait): + self._validate_refresh_schema(keyspace, table, usertype, function, aggregate) + if not self.control_connection.refresh_schema(keyspace, table, usertype, function, + aggregate, max_schema_agreement_wait): raise Exception("Schema was not refreshed. See log for details.") def submit_schema_refresh(self, keyspace=None, table=None, usertype=None, function=None): @@ -1156,9 +1159,9 @@ def submit_schema_refresh(self, keyspace=None, table=None, usertype=None, functi Schedule a refresh of the internal representation of the current schema for this cluster. See :meth:`~.refresh_schema` for description of parameters. """ - self._validate_refresh_schema(keyspace, table, usertype, function) + self._validate_refresh_schema(keyspace, table, usertype, function, aggregate) return self.executor.submit( - self.control_connection.refresh_schema, keyspace, table, usertype, function) + self.control_connection.refresh_schema, keyspace, table, usertype, function, aggregate) def refresh_nodes(self): """ @@ -1840,6 +1843,7 @@ class ControlConnection(object): _SELECT_COLUMNS = "SELECT * FROM system.schema_columns" _SELECT_USERTYPES = "SELECT * FROM system.schema_usertypes" _SELECT_FUNCTIONS = "SELECT * FROM system.schema_functions" + _SELECT_AGGREGATES = "SELECT * FROM system.schema_aggregates" _SELECT_TRIGGERS = "SELECT * FROM system.schema_triggers" _SELECT_PEERS = "SELECT peer, data_center, rack, tokens, rpc_address, schema_version FROM system.peers" @@ -2028,7 +2032,7 @@ def shutdown(self): del self._connection def refresh_schema(self, keyspace=None, table=None, usertype=None, function=None, - schema_agreement_wait=None): + aggregate=None, schema_agreement_wait=None): if not self._meta_refresh_enabled: log.debug("[control connection] Skipping schema refresh because meta refresh is disabled") return False @@ -2036,7 +2040,7 @@ def refresh_schema(self, keyspace=None, table=None, usertype=None, function=None try: if self._connection: return self._refresh_schema(self._connection, keyspace, table, usertype, function, - schema_agreement_wait=schema_agreement_wait) + aggregate, schema_agreement_wait=schema_agreement_wait) except ReferenceError: pass # our weak reference to the Cluster is no good except Exception: @@ -2045,11 +2049,11 @@ def refresh_schema(self, keyspace=None, table=None, usertype=None, function=None return False def _refresh_schema(self, connection, keyspace=None, table=None, usertype=None, function=None, - preloaded_results=None, schema_agreement_wait=None): + aggregate=None, preloaded_results=None, schema_agreement_wait=None): if self._cluster.is_shutdown: return False - assert sum(1 for arg in (table, usertype, function) if arg) <= 1 + assert sum(1 for arg in (table, usertype, function, aggregate) if arg) <= 1 agreed = self.wait_for_schema_agreement(connection, preloaded_results=preloaded_results, @@ -2102,6 +2106,15 @@ def _handle_results(success, result): log.debug("[control connection] Fetched user function info for %s.%s, rebuilding metadata", keyspace, function.signature) functions_result = dict_factory(*functions_result.results) if functions_result.results else {} self._cluster.metadata.function_changed(keyspace, function, functions_result) + elif aggregate: + # user defined aggregate within this keyspace changed + where_clause = " WHERE keyspace_name = '%s' AND aggregate_name = '%s' AND signature = [%s]" \ + % (keyspace, aggregate.name, ','.join("'%s'" % t for t in aggregate.type_signature)) + aggregates_query = QueryMessage(query=self._SELECT_AGGREGATES + where_clause, consistency_level=cl) + aggregates_result = connection.wait_for_response(aggregates_query) + log.debug("[control connection] Fetched user aggregate info for %s.%s, rebuilding metadata", keyspace, aggregate.signature) + aggregates_result = dict_factory(*aggregates_result.results) if aggregates_result.results else {} + self._cluster.metadata.aggregate_changed(keyspace, aggregate, aggregates_result) elif keyspace: # only the keyspace itself changed (such as replication settings) where_clause = " WHERE keyspace_name = '%s'" % (keyspace,) @@ -2118,6 +2131,7 @@ def _handle_results(success, result): QueryMessage(query=self._SELECT_COLUMNS, consistency_level=cl), QueryMessage(query=self._SELECT_USERTYPES, consistency_level=cl), QueryMessage(query=self._SELECT_FUNCTIONS, consistency_level=cl), + QueryMessage(query=self._SELECT_AGGREGATES, consistency_level=cl), QueryMessage(query=self._SELECT_TRIGGERS, consistency_level=cl) ] @@ -2125,6 +2139,7 @@ def _handle_results(success, result): (ks_success, ks_result), (cf_success, cf_result), \ (col_success, col_result), (types_success, types_result), \ (functions_success, functions_result), \ + (aggregates_success, aggregates_result), \ (trigger_success, triggers_result) = responses if ks_success: @@ -2176,8 +2191,19 @@ def _handle_results(success, result): else: raise functions_result + # aggregates were introduced in Cassandra 3.0 + if aggregates_success: + aggregates_result = dict_factory(*aggregates_result.results) if aggregates_result.results else {} + else: + if isinstance(aggregates_result, InvalidRequest): + log.debug("[control connection] user aggregates table not found") + aggregates_result = {} + else: + raise aggregates_result + log.debug("[control connection] Fetched schema, rebuilding metadata") - self._cluster.metadata.rebuild_schema(ks_result, types_result, functions_result, cf_result, col_result, triggers_result) + self._cluster.metadata.rebuild_schema(ks_result, types_result, functions_result, + aggregates_result, cf_result, col_result, triggers_result) return True def refresh_node_list_and_token_map(self, force_token_rebuild=False): @@ -2321,13 +2347,13 @@ def _handle_status_change(self, event): def _handle_schema_change(self, event): if self._schema_event_refresh_window < 0: return - keyspace = event.get('keyspace') table = event.get('table') usertype = event.get('type') function = event.get('function') + aggregate = event.get('aggregate') delay = random() * self._schema_event_refresh_window - self._cluster.scheduler.schedule_unique(delay, self.refresh_schema, keyspace, table, usertype, function) + self._cluster.scheduler.schedule_unique(delay, self.refresh_schema, keyspace, table, usertype, function, aggregate) def wait_for_schema_agreement(self, connection=None, preloaded_results=None, wait_time=None): @@ -2556,19 +2582,20 @@ def _log_if_failed(self, future): exc_info=exc) -def refresh_schema_and_set_result(keyspace, table, usertype, function, control_conn, response_future): +def refresh_schema_and_set_result(keyspace, table, usertype, function, aggregate, control_conn, response_future): try: if control_conn._meta_refresh_enabled: - log.debug("Refreshing schema in response to schema change. Keyspace: %s; Table: %s, Type: %s, Function: %s", - keyspace, table, usertype, function) - control_conn._refresh_schema(response_future._connection, keyspace, table, usertype, function) + log.debug("Refreshing schema in response to schema change. " + "Keyspace: %s; Table: %s, Type: %s, Function: %s, Aggregate: %s", + keyspace, table, usertype, function, aggregate) + control_conn._refresh_schema(response_future._connection, keyspace, table, usertype, function, aggregate) else: log.debug("Skipping schema refresh in response to schema change because meta refresh is disabled; " - "Keyspace: %s; Table: %s, Type: %s, Function: %s", keyspace, table, usertype, function) + "Keyspace: %s; Table: %s, Type: %s, Function: %s", keyspace, table, usertype, function, aggregate) except Exception: log.exception("Exception refreshing schema in response to schema change:") response_future.session.submit( - control_conn.refresh_schema, keyspace, table, usertype, function) + control_conn.refresh_schema, keyspace, table, usertype, function, aggregate) finally: response_future._set_final_result(None) @@ -2754,6 +2781,7 @@ def _set_result(self, response): response.results.get('table'), response.results.get('type'), response.results.get('function'), + response.results.get('aggregate'), self.session.cluster.control_connection, self) else: diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 32edd01f9c..2b92e51408 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -30,8 +30,9 @@ except ImportError as e: pass -from cassandra import UserFunctionDescriptor +from cassandra import SignatureDescriptor import cassandra.cqltypes as types +from cassandra.encoder import Encoder from cassandra.marshal import varint_unpack from cassandra.pool import Host from cassandra.util import OrderedDict @@ -91,7 +92,7 @@ def export_schema_as_string(self): return "\n".join(ks.export_as_string() for ks in self.keyspaces.values()) def rebuild_schema(self, ks_results, type_results, function_results, - cf_results, col_results, triggers_result): + aggregate_results, cf_results, col_results, triggers_result): """ Rebuild the view of the current schema from a fresh set of rows from the system schema tables. @@ -102,6 +103,7 @@ def rebuild_schema(self, ks_results, type_results, function_results, col_def_rows = defaultdict(lambda: defaultdict(list)) usertype_rows = defaultdict(list) fn_rows = defaultdict(list) + agg_rows = defaultdict(list) trigger_rows = defaultdict(lambda: defaultdict(list)) for row in cf_results: @@ -118,6 +120,9 @@ def rebuild_schema(self, ks_results, type_results, function_results, for row in function_results: fn_rows[row["keyspace_name"]].append(row) + for row in aggregate_results: + agg_rows[row["keyspace_name"]].append(row) + for row in triggers_result: ksname = row["keyspace_name"] cfname = row["columnfamily_name"] @@ -142,6 +147,10 @@ def rebuild_schema(self, ks_results, type_results, function_results, fn = self._build_function(keyspace_meta.name, fn_row) keyspace_meta.functions[fn.signature] = fn + for agg_row in agg_rows.get(keyspace_meta.name, []): + agg = self._build_aggregate(keyspace_meta.name, agg_row) + keyspace_meta.aggregates[agg.signature] = agg + current_keyspaces.add(keyspace_meta.name) old_keyspace_meta = self.keyspaces.get(keyspace_meta.name, None) self.keyspaces[keyspace_meta.name] = keyspace_meta @@ -172,6 +181,7 @@ def keyspace_changed(self, keyspace, ks_results): keyspace_meta.tables = old_keyspace_meta.tables keyspace_meta.user_types = old_keyspace_meta.user_types keyspace_meta.functions = old_keyspace_meta.functions + keyspace_meta.aggregates = old_keyspace_meta.aggregates if (keyspace_meta.replication_strategy != old_keyspace_meta.replication_strategy): self._keyspace_updated(keyspace) else: @@ -193,6 +203,14 @@ def function_changed(self, keyspace, function, function_results): # the function was deleted self.keyspaces[keyspace].functions.pop(function.signature, None) + def aggregate_changed(self, keyspace, aggregate, aggregate_results): + if aggregate_results: + new_aggregate = self._build_aggregate(keyspace, aggregate_results[0]) + self.keyspaces[keyspace].aggregates[aggregate.signature] = new_aggregate + else: + # the aggregate was deleted + self.keyspaces[keyspace].aggregates.pop(aggregate.signature, None) + def table_changed(self, keyspace, table, cf_results, col_results, triggers_result): try: keyspace_meta = self.keyspaces[keyspace] @@ -242,6 +260,16 @@ def _build_function(self, keyspace, function_row): return_type, function_row['language'], function_row['body'], function_row['is_deterministic'], function_row['called_on_null_input']) + def _build_aggregate(self, keyspace, aggregate_row): + state_type = types.lookup_casstype(aggregate_row['state_type']) + initial_condition = aggregate_row['initcond'] + if initial_condition is not None: + initial_condition = state_type.deserialize(initial_condition, 3) + return_type = types.lookup_casstype(aggregate_row['return_type']) + return Aggregate(aggregate_row['keyspace_name'], aggregate_row['aggregate_name'], + aggregate_row['signature'], aggregate_row['final_func'], initial_condition, + return_type, aggregate_row['state_func'], state_type) + def _build_table_metadata(self, keyspace_metadata, row, col_rows, trigger_rows): cfname = row["columnfamily_name"] cf_col_rows = col_rows.get(cfname, []) @@ -762,14 +790,21 @@ class KeyspaceMetadata(object): user_types = None """ - A map from user-defined type names to instances of :class:`~cassandra.metadata..UserType`. + A map from user-defined type names to instances of :class:`~cassandra.metadata.UserType`. .. versionadded:: 2.1.0 """ functions = None """ - A map from user-defined function names to instances of :class:`~cassandra.metadata..Function`. + A map from user-defined function signatures to instances of :class:`~cassandra.metadata.Function`. + + .. versionadded:: 3.0.0 + """ + + aggregates = None + """ + A map from user-defined aggregate signatures to instances of :class:`~cassandra.metadata.Aggregate`. .. versionadded:: 3.0.0 """ @@ -780,6 +815,7 @@ def __init__(self, name, durable_writes, strategy_class, strategy_options): self.tables = {} self.user_types = {} self.functions = {} + self.aggregates = {} def export_as_string(self): """ @@ -789,6 +825,7 @@ def export_as_string(self): return "\n\n".join([self.as_cql_query()] + self.user_type_strings() + [f.as_cql_query(True) for f in self.functions.values()] + + [a.as_cql_query(True) for a in self.aggregates.values()] + [t.export_as_string() for t in self.tables.values()]) def as_cql_query(self): @@ -880,6 +917,95 @@ def as_cql_query(self, formatted=False): return ret +class Aggregate(object): + """ + A user defined aggregate function, as created by ``CREATE AGGREGATE`` statements. + + Aggregate functions were introduced in Cassandra 3.0 + + .. versionadded:: 3.0.0 + """ + + keyspace = None + """ + The string name of the keyspace in which this aggregate is defined + """ + + name = None + """ + The name of this aggregate + """ + + type_signature = None + """ + An ordered list of the types for each argument to the aggregate + """ + + final_func = None + """ + Name of a final function + """ + + initial_condition = None + """ + Initial condition of the aggregate + """ + + return_type = None + """ + Return type of the aggregate + """ + + state_func = None + """ + Name of a state function + """ + + state_type = None + """ + Flag indicating whether this function is deterministic + (required for functional indexes) + """ + + def __init__(self, keyspace, name, type_signature, final_func, + initial_condition, return_type, state_func, state_type): + self.keyspace = keyspace + self.name = name + self.type_signature = type_signature + self.final_func = final_func + self.initial_condition = initial_condition + self.return_type = return_type + self.state_func = state_func + self.state_type = state_type + + def as_cql_query(self, formatted=False): + """ + Returns a CQL query that can be used to recreate this aggregate. + If `formatted` is set to :const:`True`, extra whitespace will + be added to make the query more readable. + """ + sep = '\n' if formatted else ' ' + keyspace = protect_name(self.keyspace) + name = protect_name(self.name) + arg_list = ', '.join(self.type_signature) + state_func = protect_name(self.state_func) + state_type = self.state_type.cql_parameterized_type() + + ret = "CREATE AGGREGATE %(keyspace)s.%(name)s(%(arg_list)s)%(sep)s" \ + "SFUNC %(state_func)s%(sep)s" \ + "STYPE %(state_type)s" % locals() + + ret += ''.join((sep, 'FINALFUNC ', protect_name(self.final_func))) if self.final_func else '' + ret += ''.join((sep, 'INITCOND ', Encoder().cql_encode_all_types(self.initial_condition)))\ + if self.initial_condition is not None else '' + + return ret + + @property + def signature(self): + return SignatureDescriptor.format_signature(self.name, self.type_signature) + + class Function(object): """ A user defined function, as created by ``CREATE FUNCTION`` statements. @@ -973,7 +1099,7 @@ def as_cql_query(self, formatted=False): @property def signature(self): - return UserFunctionDescriptor.format_signature(self.name, self.type_signature) + return SignatureDescriptor.format_signature(self.name, self.type_signature) class TableMetadata(object): diff --git a/cassandra/protocol.py b/cassandra/protocol.py index 9ec114e740..c6c34615a4 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -23,7 +23,8 @@ from cassandra import (Unavailable, WriteTimeout, ReadTimeout, AlreadyExists, InvalidRequest, Unauthorized, - UnsupportedOperation, UserFunctionDescriptor) + UnsupportedOperation, UserFunctionDescriptor, + UserAggregateDescriptor) from cassandra.marshal import (int32_pack, int32_unpack, uint16_pack, uint16_unpack, int8_pack, int8_unpack, uint64_pack, header_pack, v3_header_pack) @@ -853,8 +854,10 @@ def recv_schema_change(cls, f, protocol_version): event = {'change_type': change_type, 'keyspace': keyspace} if target != "KEYSPACE": target_name = read_string(f) - if target in ('FUNCTION', 'AGGREGATE'): + if target == 'FUNCTION': event['function'] = UserFunctionDescriptor(target_name, [read_string(f) for _ in range(read_short(f))]) + elif target == 'AGGREGATE': + event['aggregate'] = UserAggregateDescriptor(target_name, [read_string(f) for _ in range(read_short(f))]) else: event[target.lower()] = target_name else: From a0482216e8df887d3aa1276703cd3d87239f8148 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 14 Apr 2015 16:52:21 -0500 Subject: [PATCH 1332/3726] Doc update for function and aggregate metadata --- docs/api/cassandra.rst | 5 +++++ docs/api/cassandra/metadata.rst | 9 +++++++++ 2 files changed, 14 insertions(+) diff --git a/docs/api/cassandra.rst b/docs/api/cassandra.rst index 91500d0f8a..87ef7fe273 100644 --- a/docs/api/cassandra.rst +++ b/docs/api/cassandra.rst @@ -16,6 +16,11 @@ .. autoclass:: UserFunctionDescriptor :members: + :inherited-members: + +.. autoclass:: UserAggregateDescriptor + :members: + :inherited-members: .. autoexception:: Unavailable() :members: diff --git a/docs/api/cassandra/metadata.rst b/docs/api/cassandra/metadata.rst index 2d3b09f766..a89fd03ddb 100644 --- a/docs/api/cassandra/metadata.rst +++ b/docs/api/cassandra/metadata.rst @@ -13,6 +13,15 @@ Schemas .. autoclass:: KeyspaceMetadata () :members: +.. autoclass:: UserType () + :members: + +.. autoclass:: Function () + :members: + +.. autoclass:: Aggregate () + :members: + .. autoclass:: TableMetadata () :members: From cf486b48ce4cae7716d4347d3d171447f40ff6ad Mon Sep 17 00:00:00 2001 From: Anthony Cervantes Date: Tue, 14 Apr 2015 19:08:41 -0500 Subject: [PATCH 1333/3726] fixed typo --- cassandra/cqlengine/models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/cqlengine/models.py b/cassandra/cqlengine/models.py index 35d10bb15d..0baa0906a0 100644 --- a/cassandra/cqlengine/models.py +++ b/cassandra/cqlengine/models.py @@ -705,7 +705,7 @@ def update(self, **values): if self._is_polymorphic_base: raise PolymorphicModelException('cannot update polymorphic base model') else: - setattr(self, self._discriminator_column_name, self.__disciminator_value__) + setattr(self, self._discriminator_column_name, self.__discriminator_value__) self.validate() self.__dmlquery__(self.__class__, self, From 37eb549b06a2bbd1f42ca26353a5f83a162c4109 Mon Sep 17 00:00:00 2001 From: Kevin Deldycke Date: Wed, 15 Apr 2015 16:33:06 +0200 Subject: [PATCH 1334/3726] Fix class attribute name. --- docs/cqlengine/upgrade_guide.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/cqlengine/upgrade_guide.rst b/docs/cqlengine/upgrade_guide.rst index 749106de5b..0657a0719c 100644 --- a/docs/cqlengine/upgrade_guide.rst +++ b/docs/cqlengine/upgrade_guide.rst @@ -95,7 +95,7 @@ Model Inheritance The names for class attributes controlling model inheritance are changing. Changes are as follows: - Replace 'polymorphic_key' in the base class Column definition with :attr:`~.discriminator_column` -- Replace the '__polymporphic_key__' class attribute the derived classes with :attr:`~.__discriminator_value__` +- Replace the '__polymorphic_key__' class attribute the derived classes with :attr:`~.__discriminator_value__` The functionality is unchanged -- the intent here is to make the names and language around these attributes more precise. For now, the old names are just deprecated, and the mapper will emit warnings if they are used. The old names From e5943882f2b99b920bbbff2d06920b01e09354bb Mon Sep 17 00:00:00 2001 From: Kevin Deldycke Date: Wed, 15 Apr 2015 16:33:17 +0200 Subject: [PATCH 1335/3726] Remove trailing spaces. --- docs/cqlengine/upgrade_guide.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/cqlengine/upgrade_guide.rst b/docs/cqlengine/upgrade_guide.rst index 0657a0719c..3488902790 100644 --- a/docs/cqlengine/upgrade_guide.rst +++ b/docs/cqlengine/upgrade_guide.rst @@ -41,7 +41,7 @@ Package-Level Aliases Legacy cqlengine defined a number of aliases at the package level, which became redundant when the package was integrated for a driver. These have been removed in favor of absolute imports, and referring to cannonical definitions. For example, ``cqlengine.ONE`` was an alias -of ``cassandra.ConsistencyLevel.ONE``. In the integrated package, only the +of ``cassandra.ConsistencyLevel.ONE``. In the integrated package, only the :class:`cassandra.ConsistencyLevel` remains. Additionally, submodule aliases are removed from cqlengine in favor of absolute imports. @@ -70,7 +70,7 @@ IfNotExistsWithCounterColumn cassandra.cqlengine.query UnicodeMixin Consolidation -------------------------- ``class UnicodeMixin`` was defined in several cqlengine modules. This has been consolidated -to a single definition in the cqlengine package init file. This is not technically part of +to a single definition in the cqlengine package init file. This is not technically part of the API, but noted here for completeness. API Deprecations @@ -97,7 +97,7 @@ The names for class attributes controlling model inheritance are changing. Chang - Replace 'polymorphic_key' in the base class Column definition with :attr:`~.discriminator_column` - Replace the '__polymorphic_key__' class attribute the derived classes with :attr:`~.__discriminator_value__` -The functionality is unchanged -- the intent here is to make the names and language around these attributes more precise. +The functionality is unchanged -- the intent here is to make the names and language around these attributes more precise. For now, the old names are just deprecated, and the mapper will emit warnings if they are used. The old names will be removed in a future version. @@ -117,7 +117,7 @@ Before:: class Dog(Pet): __polymorphic_key__ = 'dog' - + After:: class Pet(models.Model): From 3ab95f0adf09ff2ebcbb37271ca28ac418217aa2 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 16 Apr 2015 09:23:45 -0500 Subject: [PATCH 1336/3726] Add OrderedMapSerializedKey to cql encoder mapping This is to correctly encode values that are originally returned from a query. --- cassandra/encoder.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cassandra/encoder.py b/cassandra/encoder.py index 02eed2aa22..d9c3e85212 100644 --- a/cassandra/encoder.py +++ b/cassandra/encoder.py @@ -28,7 +28,8 @@ from uuid import UUID import six -from cassandra.util import OrderedDict, OrderedMap, sortedset, Time +from cassandra.util import (OrderedDict, OrderedMap, OrderedMapSerializedKey, + sortedset, Time) if six.PY3: long = int @@ -79,6 +80,7 @@ def __init__(self): dict: self.cql_encode_map_collection, OrderedDict: self.cql_encode_map_collection, OrderedMap: self.cql_encode_map_collection, + OrderedMapSerializedKey: self.cql_encode_map_collection, list: self.cql_encode_list_collection, tuple: self.cql_encode_list_collection, set: self.cql_encode_set_collection, From e17c0fe2ee09f300141bb5d7d323e61e88c89c59 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 16 Apr 2015 09:25:55 -0500 Subject: [PATCH 1337/3726] Reorder aggregate args to match grammar order, not table column order. --- cassandra/metadata.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 2b92e51408..b6e0aee7b3 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -267,8 +267,8 @@ def _build_aggregate(self, keyspace, aggregate_row): initial_condition = state_type.deserialize(initial_condition, 3) return_type = types.lookup_casstype(aggregate_row['return_type']) return Aggregate(aggregate_row['keyspace_name'], aggregate_row['aggregate_name'], - aggregate_row['signature'], aggregate_row['final_func'], initial_condition, - return_type, aggregate_row['state_func'], state_type) + aggregate_row['signature'], aggregate_row['state_func'], state_type, + aggregate_row['final_func'], initial_condition, return_type) def _build_table_metadata(self, keyspace_metadata, row, col_rows, trigger_rows): cfname = row["columnfamily_name"] @@ -967,16 +967,16 @@ class Aggregate(object): (required for functional indexes) """ - def __init__(self, keyspace, name, type_signature, final_func, - initial_condition, return_type, state_func, state_type): + def __init__(self, keyspace, name, type_signature, state_func, + state_type, final_func, initial_condition, return_type): self.keyspace = keyspace self.name = name self.type_signature = type_signature + self.state_func = state_func + self.state_type = state_type self.final_func = final_func self.initial_condition = initial_condition self.return_type = return_type - self.state_func = state_func - self.state_type = state_type def as_cql_query(self, formatted=False): """ @@ -987,11 +987,11 @@ def as_cql_query(self, formatted=False): sep = '\n' if formatted else ' ' keyspace = protect_name(self.keyspace) name = protect_name(self.name) - arg_list = ', '.join(self.type_signature) + type_list = ', '.join(self.type_signature) state_func = protect_name(self.state_func) state_type = self.state_type.cql_parameterized_type() - ret = "CREATE AGGREGATE %(keyspace)s.%(name)s(%(arg_list)s)%(sep)s" \ + ret = "CREATE AGGREGATE %(keyspace)s.%(name)s(%(type_list)s)%(sep)s" \ "SFUNC %(state_func)s%(sep)s" \ "STYPE %(state_type)s" % locals() From 5e5deb17419e4eb3fdefe490c765e536c66f69a8 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 16 Apr 2015 09:46:54 -0500 Subject: [PATCH 1338/3726] Aggregate metadata integration tests --- cassandra/cluster.py | 2 +- tests/integration/standard/test_metadata.py | 224 +++++++++++++++++--- 2 files changed, 199 insertions(+), 27 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 8832bb8f3a..e8697120e3 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1154,7 +1154,7 @@ def refresh_schema(self, keyspace=None, table=None, usertype=None, function=None aggregate, max_schema_agreement_wait): raise Exception("Schema was not refreshed. See log for details.") - def submit_schema_refresh(self, keyspace=None, table=None, usertype=None, function=None): + def submit_schema_refresh(self, keyspace=None, table=None, usertype=None, function=None, aggregate=None): """ Schedule a refresh of the internal representation of the current schema for this cluster. See :meth:`~.refresh_schema` for description of parameters. diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 05c8ba5611..9c80a2eef0 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -22,11 +22,13 @@ import six import sys -from cassandra import AlreadyExists, UserFunctionDescriptor +from cassandra import AlreadyExists, SignatureDescriptor from cassandra.cluster import Cluster +from cassandra.cqltypes import DoubleType, Int32Type, ListType, UTF8Type, MapType +from cassandra.encoder import Encoder from cassandra.metadata import (Metadata, KeyspaceMetadata, TableMetadata, - Token, MD5Token, TokenMap, murmur3) + Token, MD5Token, TokenMap, murmur3, Function, Aggregate) from cassandra.policies import SimpleConvictionPolicy from cassandra.pool import Host @@ -929,14 +931,11 @@ def test_keyspace_alter(self): self.assertNotEqual(original_keyspace_meta, new_keyspace_meta) self.assertEqual(new_keyspace_meta.durable_writes, False) -from cassandra.cqltypes import DoubleType -from cassandra.metadata import Function - - -class FunctionMetadata(unittest.TestCase): - - keyspace_name = "functionmetadatatest" +class FunctionTest(unittest.TestCase): + """ + Base functionality for Function and Aggregate metadata test classes + """ @property def function_name(self): return self._testMethodName.lower() @@ -947,40 +946,57 @@ def setup_class(cls): raise unittest.SkipTest("Function metadata requires native protocol version 4+") cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION) + cls.keyspace_name = cls.__name__.lower() cls.session = cls.cluster.connect() - cls.session.execute("CREATE KEYSPACE %s WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}" % cls.keyspace_name) + cls.session.execute("CREATE KEYSPACE IF NOT EXISTS %s WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}" % cls.keyspace_name) + cls.session.set_keyspace(cls.keyspace_name) cls.keyspace_function_meta = cls.cluster.metadata.keyspaces[cls.keyspace_name].functions + cls.keyspace_aggregate_meta = cls.cluster.metadata.keyspaces[cls.keyspace_name].aggregates @classmethod def teardown_class(cls): - cls.session.execute("DROP KEYSPACE %s" % cls.keyspace_name) + cls.session.execute("DROP KEYSPACE IF EXISTS %s" % cls.keyspace_name) cls.cluster.shutdown() - class VerifiedFunction(object): - def __init__(self, test_case, **function_kwargs): + class Verified(object): + + def __init__(self, test_case, meta_class, element_meta, **function_kwargs): self.test_case = test_case self.function_kwargs = dict(function_kwargs) + self.meta_class = meta_class + self.element_meta = element_meta def __enter__(self): tc = self.test_case - expected_meta = Function(**self.function_kwargs) - tc.assertNotIn(expected_meta.signature, tc.keyspace_function_meta) + expected_meta = self.meta_class(**self.function_kwargs) + tc.assertNotIn(expected_meta.signature, self.element_meta) tc.session.execute(expected_meta.as_cql_query()) - tc.assertIn(expected_meta.signature, tc.keyspace_function_meta) + tc.assertIn(expected_meta.signature, self.element_meta) - generated_meta = tc.keyspace_function_meta[expected_meta.signature] + generated_meta = self.element_meta[expected_meta.signature] self.test_case.assertEqual(generated_meta.as_cql_query(), expected_meta.as_cql_query()) return self def __exit__(self, exc_type, exc_val, exc_tb): tc = self.test_case - tc.session.execute("DROP FUNCTION %s.%s" % (tc.keyspace_name, self.signature)) - tc.assertNotIn(self.signature, tc.keyspace_function_meta) + tc.session.execute("DROP %s %s.%s" % (self.meta_class.__name__, tc.keyspace_name, self.signature)) + tc.assertNotIn(self.signature, self.element_meta) @property def signature(self): - return UserFunctionDescriptor.format_signature(self.function_kwargs['name'], - self.function_kwargs['type_signature']) + return SignatureDescriptor.format_signature(self.function_kwargs['name'], + self.function_kwargs['type_signature']) + + class VerifiedFunction(Verified): + def __init__(self, test_case, **kwargs): + super(FunctionTest.VerifiedFunction, self).__init__(test_case, Function, test_case.keyspace_function_meta, **kwargs) + + class VerifiedAggregate(Verified): + def __init__(self, test_case, **kwargs): + super(FunctionTest.VerifiedAggregate, self).__init__(test_case, Aggregate, test_case.keyspace_aggregate_meta, **kwargs) + + +class FunctionMetadata(FunctionTest): def make_function_kwargs(self, deterministic=True, called_on_null=True): return {'keyspace': self.keyspace_name, @@ -993,18 +1009,15 @@ def make_function_kwargs(self, deterministic=True, called_on_null=True): 'is_deterministic': deterministic, 'called_on_null_input': called_on_null} - def test_create_drop_function(self): - with self.VerifiedFunction(self, **self.make_function_kwargs()): - pass - def test_functions_after_udt(self): self.assertNotIn(self.function_name, self.keyspace_function_meta) udt_name = 'udtx' - self.session.execute("CREATE TYPE %s.%s (x int)" % (self.keyspace_name, udt_name)) + self.session.execute("CREATE TYPE %s (x int)" % udt_name) # Ideally we would make a function that takes a udt type, but # this presently fails because C* c059a56 requires udt to be frozen to create, but does not store meta indicating frozen + # https://issues.apache.org/jira/browse/CASSANDRA-9186 # Maybe update this after release #kwargs = self.make_function_kwargs() #kwargs['type_signature'][0] = "frozen<%s>" % udt_name @@ -1066,3 +1079,162 @@ def test_function_cql_called_on_null(self): with self.VerifiedFunction(self, **kwargs) as vf: fn_meta = self.keyspace_function_meta[vf.signature] self.assertRegexpMatches(fn_meta.as_cql_query(), "CREATE FUNCTION.*\) RETURNS NULL ON NULL INPUT RETURNS .*") + + +class AggregateMetadata(FunctionTest): + + @classmethod + def setup_class(cls): + super(AggregateMetadata, cls).setup_class() + + cls.session.execute("""CREATE OR REPLACE FUNCTION sum_int(s int, i int) + RETURNS NULL ON NULL INPUT + RETURNS int + LANGUAGE javascript AS 's + i';""") + cls.session.execute("""CREATE OR REPLACE FUNCTION sum_int_two(s int, i int, j int) + RETURNS NULL ON NULL INPUT + RETURNS int + LANGUAGE javascript AS 's + i + j';""") + cls.session.execute("""CREATE OR REPLACE FUNCTION "List_As_String"(l list) + RETURNS NULL ON NULL INPUT + RETURNS int + LANGUAGE javascript AS ''''' + l';""") + cls.session.execute("""CREATE OR REPLACE FUNCTION extend_list(s list, i int) + CALLED ON NULL INPUT + RETURNS list + LANGUAGE java AS 'if (i != null) s.add(i.toString()); return s;';""") + cls.session.execute("""CREATE OR REPLACE FUNCTION update_map(s map, i int) + RETURNS NULL ON NULL INPUT + RETURNS map + LANGUAGE java AS 's.put(new Integer(i), new Integer(i)); return s;';""") + cls.session.execute("""CREATE TABLE IF NOT EXISTS t + (k int PRIMARY KEY, v int)""") + for x in range(4): + cls.session.execute("INSERT INTO t (k,v) VALUES (%s, %s)", (x, x)) + cls.session.execute("INSERT INTO t (k) VALUES (%s)", (4,)) + + def make_aggregate_kwargs(self, state_func, state_type, final_func=None, init_cond=None): + return {'keyspace': self.keyspace_name, + 'name': self.function_name + '_aggregate', + 'type_signature': ['int'], + 'state_func': state_func, + 'state_type': state_type, + 'final_func': final_func, + 'initial_condition': init_cond, + 'return_type': "does not matter for creation"} + + def test_return_type_meta(self): + with self.VerifiedAggregate(self, **self.make_aggregate_kwargs('sum_int', Int32Type, init_cond=1)) as va: + self.assertIs(self.keyspace_aggregate_meta[va.signature].return_type, Int32Type) + + def test_init_cond(self): + # This is required until the java driver bundled with C* is updated to support v4 + c = Cluster(protocol_version=3) + s = c.connect(self.keyspace_name) + + expected_values = range(4) + + # int32 + for init_cond in (-1, 0, 1): + with self.VerifiedAggregate(self, **self.make_aggregate_kwargs('sum_int', Int32Type, init_cond=init_cond)) as va: + sum_res = s.execute("SELECT %s(v) AS sum FROM t" % va.function_kwargs['name'])[0].sum + self.assertEqual(sum_res, init_cond + sum(expected_values)) + + # list + for init_cond in ([], ['1', '2']): + with self.VerifiedAggregate(self, **self.make_aggregate_kwargs('extend_list', ListType.apply_parameters([UTF8Type]), init_cond=init_cond)) as va: + list_res = s.execute("SELECT %s(v) AS list_res FROM t" % va.function_kwargs['name'])[0].list_res + self.assertListEqual(list_res[:len(init_cond)], init_cond) + self.assertEqual(set(i for i in list_res[len(init_cond):]), + set(str(i) for i in expected_values)) + + # map + expected_map_values = dict((i, i) for i in expected_values) + expected_key_set = set(expected_values) + for init_cond in ({}, {1: 2, 3: 4}, {5: 5}): + with self.VerifiedAggregate(self, **self.make_aggregate_kwargs('update_map', MapType.apply_parameters([Int32Type, Int32Type]), init_cond=init_cond)) as va: + map_res = s.execute("SELECT %s(v) AS map_res FROM t" % va.function_kwargs['name'])[0].map_res + self.assertDictContainsSubset(expected_map_values, map_res) + init_not_updated = dict((k, init_cond[k]) for k in set(init_cond) - expected_key_set) + self.assertDictContainsSubset(init_not_updated, map_res) + c.shutdown() + + def test_aggregates_after_functions(self): + # functions must come before functions in keyspace dump + with self.VerifiedAggregate(self, **self.make_aggregate_kwargs('extend_list', ListType.apply_parameters([UTF8Type]))): + keyspace_cql = self.cluster.metadata.keyspaces[self.keyspace_name].export_as_string() + func_idx = keyspace_cql.find("CREATE FUNCTION") + aggregate_idx = keyspace_cql.rfind("CREATE AGGREGATE") + self.assertNotIn(-1, (aggregate_idx, func_idx), "AGGREGATE or FUNCTION not found in keyspace_cql: " + keyspace_cql) + self.assertGreater(aggregate_idx, func_idx) + + def test_same_name_diff_types(self): + kwargs = self.make_aggregate_kwargs('sum_int', Int32Type, init_cond=0) + with self.VerifiedAggregate(self, **kwargs): + kwargs['state_func'] = 'sum_int_two' + kwargs['type_signature'] = ['int', 'int'] + with self.VerifiedAggregate(self, **kwargs): + aggregates = [a for a in self.keyspace_aggregate_meta.values() if a.name == kwargs['name']] + self.assertEqual(len(aggregates), 2) + self.assertNotEqual(aggregates[0].type_signature, aggregates[1].type_signature) + + def test_aggregates_follow_keyspace_alter(self): + with self.VerifiedAggregate(self, **self.make_aggregate_kwargs('sum_int', Int32Type, init_cond=0)): + original_keyspace_meta = self.cluster.metadata.keyspaces[self.keyspace_name] + self.session.execute('ALTER KEYSPACE %s WITH durable_writes = false' % self.keyspace_name) + try: + new_keyspace_meta = self.cluster.metadata.keyspaces[self.keyspace_name] + self.assertNotEqual(original_keyspace_meta, new_keyspace_meta) + self.assertIs(original_keyspace_meta.aggregates, new_keyspace_meta.aggregates) + finally: + self.session.execute('ALTER KEYSPACE %s WITH durable_writes = true' % self.keyspace_name) + + def test_cql_optional_params(self): + kwargs = self.make_aggregate_kwargs('extend_list', ListType.apply_parameters([UTF8Type])) + + # no initial condition, final func + self.assertIsNone(kwargs['initial_condition']) + self.assertIsNone(kwargs['final_func']) + with self.VerifiedAggregate(self, **kwargs) as va: + meta = self.keyspace_aggregate_meta[va.signature] + self.assertIsNone(meta.initial_condition) + self.assertIsNone(meta.final_func) + cql = meta.as_cql_query() + self.assertEqual(cql.find('INITCOND'), -1) + self.assertEqual(cql.find('FINALFUNC'), -1) + + # initial condition, no final func + kwargs['initial_condition'] = ['init', 'cond'] + with self.VerifiedAggregate(self, **kwargs) as va: + meta = self.keyspace_aggregate_meta[va.signature] + self.assertListEqual(meta.initial_condition, kwargs['initial_condition']) + self.assertIsNone(meta.final_func) + cql = meta.as_cql_query() + search_string = "INITCOND %s" % Encoder().cql_encode_all_types(kwargs['initial_condition']) + self.assertGreater(cql.find(search_string), 0, '"%s" search string not found in cql:\n%s' % (search_string, cql)) + self.assertEqual(cql.find('FINALFUNC'), -1) + + # no initial condition, final func + kwargs['initial_condition'] = None + kwargs['final_func'] = 'List_As_String' + with self.VerifiedAggregate(self, **kwargs) as va: + meta = self.keyspace_aggregate_meta[va.signature] + self.assertIsNone(meta.initial_condition) + self.assertEqual(meta.final_func, kwargs['final_func']) + cql = meta.as_cql_query() + self.assertEqual(cql.find('INITCOND'), -1) + search_string = 'FINALFUNC "%s"' % kwargs['final_func'] + self.assertGreater(cql.find(search_string), 0, '"%s" search string not found in cql:\n%s' % (search_string, cql)) + + # both + kwargs['initial_condition'] = ['init', 'cond'] + kwargs['final_func'] = 'List_As_String' + with self.VerifiedAggregate(self, **kwargs) as va: + meta = self.keyspace_aggregate_meta[va.signature] + self.assertListEqual(meta.initial_condition, kwargs['initial_condition']) + self.assertEqual(meta.final_func, kwargs['final_func']) + cql = meta.as_cql_query() + init_cond_idx = cql.find("INITCOND %s" % Encoder().cql_encode_all_types(kwargs['initial_condition'])) + final_func_idx = cql.find('FINALFUNC "%s"' % kwargs['final_func']) + self.assertNotIn(-1, (init_cond_idx, final_func_idx)) + self.assertGreater(init_cond_idx, final_func_idx) From 51d0a53c4c05696dc3dba2e562b1ea63ada69e88 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 16 Apr 2015 10:37:32 -0500 Subject: [PATCH 1339/3726] Remove link to disabled cqlengine issues --- docs/cqlengine/third_party.rst | 3 --- 1 file changed, 3 deletions(-) diff --git a/docs/cqlengine/third_party.rst b/docs/cqlengine/third_party.rst index 89f23eccb8..c4c99dbf54 100644 --- a/docs/cqlengine/third_party.rst +++ b/docs/cqlengine/third_party.rst @@ -31,9 +31,6 @@ Here's how, in substance, CQLengine can be plugged to `Celery app = Celery() -For more details, see `issue #237 -`_. - uWSGI ----- From f5d5f905e7cb7db97b4c06cbd314728c2acf410b Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 16 Apr 2015 15:34:42 -0500 Subject: [PATCH 1340/3726] Removed unused parameters from PreparedStatement init --- cassandra/query.py | 9 ++------- tests/unit/test_parameter_binding.py | 9 ++++----- 2 files changed, 6 insertions(+), 12 deletions(-) diff --git a/cassandra/query.py b/cassandra/query.py index fc71b8f62b..031a1684e5 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -338,19 +338,14 @@ class PreparedStatement(object): fetch_size = FETCH_SIZE_UNSET - def __init__(self, column_metadata, query_id, routing_key_indexes, query, keyspace, - protocol_version, consistency_level=None, serial_consistency_level=None, - fetch_size=FETCH_SIZE_UNSET): + def __init__(self, column_metadata, query_id, routing_key_indexes, query, + keyspace, protocol_version): self.column_metadata = column_metadata self.query_id = query_id self.routing_key_indexes = routing_key_indexes self.query_string = query self.keyspace = keyspace self.protocol_version = protocol_version - self.consistency_level = consistency_level - self.serial_consistency_level = serial_consistency_level - if fetch_size is not FETCH_SIZE_UNSET: - self.fetch_size = fetch_size @classmethod def from_message(cls, query_id, column_metadata, pk_indexes, cluster_metadata, query, prepared_keyspace, protocol_version): diff --git a/tests/unit/test_parameter_binding.py b/tests/unit/test_parameter_binding.py index f649c5207c..3fce7b9e91 100644 --- a/tests/unit/test_parameter_binding.py +++ b/tests/unit/test_parameter_binding.py @@ -128,8 +128,8 @@ def test_inherit_fetch_size(self): routing_key_indexes=[], query=None, keyspace=keyspace, - protocol_version=2, - fetch_size=1234) + protocol_version=2) + prepared_statement.fetch_size = 1234 bound_statement = BoundStatement(prepared_statement=prepared_statement) self.assertEqual(1234, bound_statement.fetch_size) @@ -147,10 +147,9 @@ def test_too_few_parameters_for_key(self): routing_key_indexes=[0, 1], query=None, keyspace=keyspace, - protocol_version=2, - fetch_size=1234) + protocol_version=2) self.assertRaises(ValueError, prepared_statement.bind, (1,)) - bound = prepared_statement.bind((1,2)) + bound = prepared_statement.bind((1, 2)) self.assertEqual(bound.keyspace, keyspace) From 8772e753cdf06a49822a9cf0f9a3819faf7f890b Mon Sep 17 00:00:00 2001 From: Kishan Karunaratne Date: Thu, 16 Apr 2015 14:55:26 -0700 Subject: [PATCH 1341/3726] Don't run collection index tests on C* < 2.1 --- tests/integration/standard/test_metadata.py | 46 ++++----------------- 1 file changed, 9 insertions(+), 37 deletions(-) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index e5b2eab3ce..0afc15b559 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -46,47 +46,16 @@ class SchemaMetadataTests(unittest.TestCase): def cfname(self): return self._testMethodName.lower() - @classmethod - def setup_class(cls): - cluster = Cluster(protocol_version=PROTOCOL_VERSION) - session = cluster.connect() - try: - results = session.execute("SELECT keyspace_name FROM system.schema_keyspaces") - existing_keyspaces = [row[0] for row in results] - if cls.ksname in existing_keyspaces: - session.execute("DROP KEYSPACE %s" % cls.ksname) - - session.execute( - """ - CREATE KEYSPACE %s - WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'}; - """ % cls.ksname) - finally: - cluster.shutdown() - - @classmethod - def teardown_class(cls): - cluster = Cluster(['127.0.0.1'], - protocol_version=PROTOCOL_VERSION) - session = cluster.connect() - try: - session.execute("DROP KEYSPACE %s" % cls.ksname) - finally: - cluster.shutdown() - def setUp(self): - self.cluster = Cluster(['127.0.0.1'], - protocol_version=PROTOCOL_VERSION) + self._cass_version, self._cql_version = get_server_versions() + + self.cluster = Cluster(protocol_version=PROTOCOL_VERSION) self.session = self.cluster.connect() + self.session.execute("CREATE KEYSPACE schemametadatatest WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'}") def tearDown(self): - try: - self.session.execute( - """ - DROP TABLE {ksname}.{cfname} - """.format(ksname=self.ksname, cfname=self.cfname)) - finally: - self.cluster.shutdown() + self.session.execute("DROP KEYSPACE schemametadatatest") + self.cluster.shutdown() def make_create_statement(self, partition_cols, clustering_cols=None, other_cols=None, compact=False): clustering_cols = clustering_cols or [] @@ -309,6 +278,9 @@ def test_indexes(self): self.assertIn('CREATE INDEX e_index', statement) def test_collection_indexes(self): + if get_server_versions()[0] < (2, 1, 0): + raise unittest.SkipTest("Secondary index on collections were introduced in Cassandra 2.1") + self.session.execute("CREATE TABLE %s.%s (a int PRIMARY KEY, b map)" % (self.ksname, self.cfname)) self.session.execute("CREATE INDEX index1 ON %s.%s (keys(b))" From 41d9394c077dccba4146d334b675a22fd4a682df Mon Sep 17 00:00:00 2001 From: Kishan Karunaratne Date: Thu, 16 Apr 2015 15:25:14 -0700 Subject: [PATCH 1342/3726] Don't change blob params for assertion in C* 1.2 --- tests/integration/standard/test_types.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/tests/integration/standard/test_types.py b/tests/integration/standard/test_types.py index b256e87595..c4821d9c87 100644 --- a/tests/integration/standard/test_types.py +++ b/tests/integration/standard/test_types.py @@ -57,7 +57,7 @@ def tearDown(self): def test_can_insert_blob_type_as_string(self): """ - Tests that blob type in Cassandra does not map to string in Python + Tests that byte strings in Python maps to blob type in Cassandra """ c = Cluster(protocol_version=PROTOCOL_VERSION) @@ -68,9 +68,7 @@ def test_can_insert_blob_type_as_string(self): params = ['key1', b'blobyblob'] query = "INSERT INTO blobstring (a, b) VALUES (%s, %s)" - # In python 3, the 'bytes' type is treated as a blob, so we can - # correctly encode it with hex notation. - # In python2, we don't treat the 'str' type as a blob, so we'll encode it + # In python2, with Cassandra > 2.0, we don't treat the 'byte str' type as a blob, so we'll encode it # as a string literal and have the following failure. if six.PY2 and self._cql_version >= (3, 1, 0): # Blob values can't be specified using string notation in CQL 3.1.0 and @@ -81,10 +79,14 @@ def test_can_insert_blob_type_as_string(self): msg = r'.*Invalid STRING constant \(.*?\) for b of type blob.*' self.assertRaisesRegexp(InvalidRequest, msg, s.execute, query, params) return - elif six.PY2: - params[1] = params[1].encode('hex') - s.execute(query, params) + # In python2, with Cassandra < 2.0, we can manually encode the 'byte str' type as hex for insertion in a blob. + if six.PY2: + cass_params = [params[0], params[1].encode('hex')] + s.execute(query, cass_params) + # In python 3, the 'bytes' type is treated as a blob, so we can correctly encode it with hex notation. + else: + s.execute(query, params) results = s.execute("SELECT * FROM blobstring")[0] for expected, actual in zip(params, results): From aa2be65810e0aa2675e760df83296a366fcc094b Mon Sep 17 00:00:00 2001 From: Kishan Karunaratne Date: Thu, 16 Apr 2015 15:41:58 -0700 Subject: [PATCH 1343/3726] Remove operation timeouts for write timeout metrics test --- tests/integration/standard/test_metrics.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/standard/test_metrics.py b/tests/integration/standard/test_metrics.py index 1996495c0b..7b19404907 100644 --- a/tests/integration/standard/test_metrics.py +++ b/tests/integration/standard/test_metrics.py @@ -86,7 +86,7 @@ def test_write_timeout(self): # Test write query = SimpleStatement("INSERT INTO test (k, v) VALUES (2, 2)", consistency_level=ConsistencyLevel.ALL) with self.assertRaises(WriteTimeout): - session.execute(query) + session.execute(query, timeout=None) self.assertEqual(1, cluster.metrics.stats.write_timeouts) finally: From b2514a898a05b93a9085ea7c7528e5a425b85c86 Mon Sep 17 00:00:00 2001 From: Kishan Karunaratne Date: Thu, 16 Apr 2015 17:53:28 -0700 Subject: [PATCH 1344/3726] Give extra time for auth setup on C* 1.2 tests --- tests/integration/standard/test_authentication.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/standard/test_authentication.py b/tests/integration/standard/test_authentication.py index c698c986e2..3820ef7a90 100644 --- a/tests/integration/standard/test_authentication.py +++ b/tests/integration/standard/test_authentication.py @@ -40,7 +40,7 @@ def setup_module(): ccm_cluster.start(wait_for_binary_proto=True, wait_other_notice=True) # there seems to be some race, with some versions of C* taking longer to # get the auth (and default user) setup. Sleep here to give it a chance - time.sleep(2) + time.sleep(10) def teardown_module(): From 80e952b298cc26ff3199e320b7d97626eff1f5e9 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 17 Apr 2015 10:22:37 -0500 Subject: [PATCH 1345/3726] Custom payloads for protocol v4 PYTHON-280 --- CHANGELOG.rst | 1 - cassandra/auth.py | 2 ++ cassandra/cluster.py | 42 ++++++++++++++++++++++++----- cassandra/protocol.py | 43 +++++++++++++++++++++++++++++- cassandra/query.py | 61 ++++++++++++++++++++++++++++++++++--------- 5 files changed, 128 insertions(+), 21 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index af2d611309..8f91416846 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -78,7 +78,6 @@ Bug Fixes --------- * Make execute_concurrent compatible with Python 2.6 (PYTHON-159) * Handle Unauthorized message on schema_triggers query (PYTHON-155) -* Make execute_concurrent compatible with Python 2.6 (github-197) * Pure Python sorted set in support of UDTs nested in collections (PYTON-167) * Support CUSTOM index metadata and string export (PYTHON-165) diff --git a/cassandra/auth.py b/cassandra/auth.py index 67d302a9e8..508bd150ef 100644 --- a/cassandra/auth.py +++ b/cassandra/auth.py @@ -15,6 +15,7 @@ except ImportError: SASLClient = None + class AuthProvider(object): """ An abstract class that defines the interface that will be used for @@ -157,6 +158,7 @@ def __init__(self, **sasl_kwargs): def new_authenticator(self, host): return SaslAuthenticator(**self.sasl_kwargs) + class SaslAuthenticator(Authenticator): """ A pass-through :class:`~.Authenticator` using the third party package diff --git a/cassandra/cluster.py b/cassandra/cluster.py index cb04152a5b..e84afd2ef1 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1361,7 +1361,7 @@ def __init__(self, cluster, hosts): for future in futures: future.result() - def execute(self, query, parameters=None, timeout=_NOT_SET, trace=False): + def execute(self, query, parameters=None, timeout=_NOT_SET, trace=False, custom_payload=None): """ Execute the given query and synchronously wait for the response. @@ -1389,6 +1389,10 @@ def execute(self, query, parameters=None, timeout=_NOT_SET, trace=False): instance and not just a string. If there is an error fetching the trace details, the :attr:`~.Statement.trace` attribute will be left as :const:`None`. + + `custom_payload` is a dict as described in TODO section. If `query` is a Statement + with its own custom_payload. the message will be a union of the two, + with the values specified here taking precedence. """ if timeout is _NOT_SET: timeout = self.default_timeout @@ -1398,7 +1402,7 @@ def execute(self, query, parameters=None, timeout=_NOT_SET, trace=False): "The query argument must be an instance of a subclass of " "cassandra.query.Statement when trace=True") - future = self.execute_async(query, parameters, trace) + future = self.execute_async(query, parameters, trace, custom_payload) try: result = future.result(timeout) finally: @@ -1410,7 +1414,7 @@ def execute(self, query, parameters=None, timeout=_NOT_SET, trace=False): return result - def execute_async(self, query, parameters=None, trace=False): + def execute_async(self, query, parameters=None, trace=False, custom_payload=None): """ Execute the given query and return a :class:`~.ResponseFuture` object which callbacks may be attached to for asynchronous response @@ -1422,6 +1426,13 @@ def execute_async(self, query, parameters=None, trace=False): :meth:`.ResponseFuture.get_query_trace()` after the request completes to retrieve a :class:`.QueryTrace` instance. + `custom_payload` is a dict as described in TODO section. If `query` is + a Statement with a custom_payload specified. the message will be a + union of the two, with the values specified here taking precedence. + + If the server sends a custom payload in the response message, + the dict can be obtained via :attr:`.ResponseFuture.custom_payload` + Example usage:: >>> session = cluster.connect() @@ -1447,11 +1458,11 @@ def execute_async(self, query, parameters=None, trace=False): ... log.exception("Operation failed:") """ - future = self._create_response_future(query, parameters, trace) + future = self._create_response_future(query, parameters, trace, custom_payload) future.send_request() return future - def _create_response_future(self, query, parameters, trace): + def _create_response_future(self, query, parameters, trace, custom_payload): """ Returns the ResponseFuture before calling send_request() on it """ prepared_statement = None @@ -1501,13 +1512,16 @@ def _create_response_future(self, query, parameters, trace): if trace: message.tracing = True + message.update_custom_payload(query.custom_payload) + message.update_custom_payload(custom_payload) + return ResponseFuture( self, message, query, self.default_timeout, metrics=self._metrics, prepared_statement=prepared_statement) - def prepare(self, query): + def prepare(self, query, custom_payload=None): """ - Prepares a query string, returing a :class:`~cassandra.query.PreparedStatement` + Prepares a query string, returning a :class:`~cassandra.query.PreparedStatement` instance which can be used as follows:: >>> session = cluster.connect("mykeyspace") @@ -1530,8 +1544,12 @@ def prepare(self, query): **Important**: PreparedStatements should be prepared only once. Preparing the same query more than once will likely affect performance. + + `custom_payload` is a key value map to be passed along with the prepare + message. See TODO: refer to doc section """ message = PrepareMessage(query=query) + message.custom_payload = custom_payload future = ResponseFuture(self, message, query=None) try: future.send_request() @@ -1543,6 +1561,7 @@ def prepare(self, query): prepared_statement = PreparedStatement.from_message( query_id, column_metadata, pk_indexes, self.cluster.metadata, query, self.keyspace, self._protocol_version) + prepared_statement.custom_payload = future.custom_payload host = future._current_host try: @@ -2567,6 +2586,7 @@ class ResponseFuture(object): _start_time = None _metrics = None _paging_state = None + _custom_payload = None def __init__(self, session, message, query, default_timeout=None, metrics=None, prepared_statement=None): self.session = session @@ -2654,6 +2674,12 @@ def has_more_pages(self): """ return self._paging_state is not None + @property + def custom_payload(self): + if not self._event.is_set(): + raise Exception("custom_payload cannot be retrieved before ResponseFuture is finalized") + return self._custom_payload + def start_fetching_next_page(self): """ If there are more pages left in the query result, this asynchronously @@ -2690,6 +2716,8 @@ def _set_result(self, response): if trace_id: self._query_trace = QueryTrace(trace_id, self.session) + self._custom_payload = getattr(response, 'custom_payload', None) + if isinstance(response, ResultMessage): if response.kind == RESULT_KIND_SET_KEYSPACE: session = getattr(self, 'session', None) diff --git a/cassandra/protocol.py b/cassandra/protocol.py index d81a650731..a48af3485c 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -54,6 +54,7 @@ class InternalError(Exception): COMPRESSED_FLAG = 0x01 TRACING_FLAG = 0x02 +CUSTOM_PAYLOAD_FLAG = 0x04 _message_types_by_name = {} _message_types_by_opcode = {} @@ -70,13 +71,19 @@ def __init__(cls, name, bases, dct): class _MessageType(object): tracing = False + custom_payload = None def to_binary(self, stream_id, protocol_version, compression=None): + flags = 0 body = io.BytesIO() + if self.custom_payload: + if protocol_version < 4: + raise UnsupportedOperation("Custom key/value payloads can only be used with protocol version 4 or higher") + flags |= CUSTOM_PAYLOAD_FLAG + write_bytesmap(body, self.custom_payload) self.send_body(body, protocol_version) body = body.getvalue() - flags = 0 if compression and len(body) > 0: body = compression(body) flags |= COMPRESSED_FLAG @@ -89,6 +96,12 @@ def to_binary(self, stream_id, protocol_version, compression=None): return msg.getvalue() + def update_custom_payload(self, other): + if other: + if not self.custom_payload: + self.custom_payload = {} + self.custom_payload.update(other) + def __repr__(self): return '<%s(%s)>' % (self.__class__.__name__, ', '.join('%s=%r' % i for i in _get_params(self))) @@ -116,6 +129,12 @@ def decode_response(protocol_version, user_type_map, stream_id, flags, opcode, b else: trace_id = None + if flags & CUSTOM_PAYLOAD_FLAG: + custom_payload = read_bytesmap(body) + flags ^= CUSTOM_PAYLOAD_FLAG + else: + custom_payload = None + if flags: log.warning("Unknown protocol flags set: %02x. May cause problems.", flags) @@ -123,6 +142,7 @@ def decode_response(protocol_version, user_type_map, stream_id, flags, opcode, b msg = msg_class.recv_body(body, protocol_version, user_type_map) msg.stream_id = stream_id msg.trace_id = trace_id + msg.custom_payload = custom_payload return msg @@ -918,6 +938,11 @@ def read_binary_string(f): return contents +def write_binary_string(f, s): + write_short(f, len(s)) + f.write(s) + + def write_string(f, s): if isinstance(s, six.text_type): s = s.encode('utf8') @@ -969,6 +994,22 @@ def write_stringmap(f, strmap): write_string(f, v) +def read_bytesmap(f): + numpairs = read_short(f) + bytesmap = {} + for _ in range(numpairs): + k = read_string(f) + bytesmap[k] = read_binary_string(f) + return bytesmap + + +def write_bytesmap(f, bytesmap): + write_short(f, len(bytesmap)) + for k, v in bytesmap.items(): + write_string(f, k) + write_binary_string(f, v) + + def read_stringmultimap(f): numkeys = read_short(f) strmmap = {} diff --git a/cassandra/query.py b/cassandra/query.py index 031a1684e5..70d16e9e40 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -197,11 +197,25 @@ class Statement(object): .. versionadded:: 2.1.3 """ + custom_payload = None + """ + TODO: refer to custom proto doc section + A string:binary_type dict holding custom key/value pairs to be passed + in the frame to a custom QueryHandler on the server side. + + By default these values are ignored by the server. + + These are only allowed when using protocol version 4 or higher. + + .. versionadded:: 3.0.0 + """ + _serial_consistency_level = None _routing_key = None def __init__(self, retry_policy=None, consistency_level=None, routing_key=None, - serial_consistency_level=None, fetch_size=FETCH_SIZE_UNSET, keyspace=None): + serial_consistency_level=None, fetch_size=FETCH_SIZE_UNSET, keyspace=None, + custom_payload=None): self.retry_policy = retry_policy if consistency_level is not None: self.consistency_level = consistency_level @@ -212,6 +226,8 @@ def __init__(self, retry_policy=None, consistency_level=None, routing_key=None, self.fetch_size = fetch_size if keyspace is not None: self.keyspace = keyspace + if custom_payload is not None: + self.custom_payload = custom_payload def _get_routing_key(self): return self._routing_key @@ -290,8 +306,7 @@ def _del_serial_consistency_level(self): class SimpleStatement(Statement): """ - A simple, un-prepared query. All attributes of :class:`Statement` apply - to this class as well. + A simple, un-prepared query. """ def __init__(self, query_string, *args, **kwargs): @@ -299,6 +314,8 @@ def __init__(self, query_string, *args, **kwargs): `query_string` should be a literal CQL statement with the exception of parameter placeholders that will be filled through the `parameters` argument of :meth:`.Session.execute()`. + + All arguments to :class:`Statement` apply to this class as well """ Statement.__init__(self, *args, **kwargs) self._query_string = query_string @@ -338,6 +355,8 @@ class PreparedStatement(object): fetch_size = FETCH_SIZE_UNSET + custom_payload = None + def __init__(self, column_metadata, query_id, routing_key_indexes, query, keyspace, protocol_version): self.column_metadata = column_metadata @@ -397,8 +416,6 @@ class BoundStatement(Statement): """ A prepared statement that has been bound to a particular set of values. These may be created directly or through :meth:`.PreparedStatement.bind()`. - - All attributes of :class:`Statement` apply to this class as well. """ prepared_statement = None @@ -414,13 +431,15 @@ class BoundStatement(Statement): def __init__(self, prepared_statement, *args, **kwargs): """ `prepared_statement` should be an instance of :class:`PreparedStatement`. - All other ``*args`` and ``**kwargs`` will be passed to :class:`.Statement`. + + All arguments to :class:`Statement` apply to this class as well """ self.prepared_statement = prepared_statement self.consistency_level = prepared_statement.consistency_level self.serial_consistency_level = prepared_statement.serial_consistency_level self.fetch_size = prepared_statement.fetch_size + self.custom_payload = prepared_statement.custom_payload self.values = [] meta = prepared_statement.column_metadata @@ -601,7 +620,8 @@ class BatchStatement(Statement): _session = None def __init__(self, batch_type=BatchType.LOGGED, retry_policy=None, - consistency_level=None, serial_consistency_level=None, session=None): + consistency_level=None, serial_consistency_level=None, + session=None, custom_payload=None): """ `batch_type` specifies The :class:`.BatchType` for the batch operation. Defaults to :attr:`.BatchType.LOGGED`. @@ -612,6 +632,10 @@ def __init__(self, batch_type=BatchType.LOGGED, retry_policy=None, `consistency_level` should be a :class:`~.ConsistencyLevel` value to be used for all operations in the batch. + `custom_payload` is a key-value map TODO: refer to doc section + Note: as Statement objects are added to the batch, this map is + updated with values from their custom payloads. + Example usage: .. code-block:: python @@ -637,12 +661,15 @@ def __init__(self, batch_type=BatchType.LOGGED, retry_policy=None, .. versionchanged:: 2.1.0 Added `serial_consistency_level` as a parameter + + .. versionchanged:: 3.0.0 + Added `custom_payload` as a parameter """ self.batch_type = batch_type self._statements_and_parameters = [] self._session = session Statement.__init__(self, retry_policy=retry_policy, consistency_level=consistency_level, - serial_consistency_level=serial_consistency_level) + serial_consistency_level=serial_consistency_level, custom_payload=custom_payload) def add(self, statement, parameters=None): """ @@ -660,7 +687,7 @@ def add(self, statement, parameters=None): elif isinstance(statement, PreparedStatement): query_id = statement.query_id bound_statement = statement.bind(() if parameters is None else parameters) - self._maybe_set_routing_attributes(bound_statement) + self._update_state(bound_statement) self._statements_and_parameters.append( (True, query_id, bound_statement.values)) elif isinstance(statement, BoundStatement): @@ -668,7 +695,7 @@ def add(self, statement, parameters=None): raise ValueError( "Parameters cannot be passed with a BoundStatement " "to BatchStatement.add()") - self._maybe_set_routing_attributes(statement) + self._update_state(statement) self._statements_and_parameters.append( (True, statement.prepared_statement.query_id, statement.values)) else: @@ -677,7 +704,7 @@ def add(self, statement, parameters=None): if parameters: encoder = Encoder() if self._session is None else self._session.encoder query_string = bind_params(query_string, parameters, encoder) - self._maybe_set_routing_attributes(statement) + self._update_state(statement) self._statements_and_parameters.append((False, query_string, ())) return self @@ -696,6 +723,16 @@ def _maybe_set_routing_attributes(self, statement): self.routing_key = statement.routing_key self.keyspace = statement.keyspace + def _update_custom_payload(self, statement): + if statement.custom_payload: + if self.custom_payload is None: + self.custom_payload = {} + self.custom_payload.update(statement.custom_payload) + + def _update_state(self, statement): + self._maybe_set_routing_attributes(statement) + self._update_custom_payload(statement) + def __str__(self): consistency = ConsistencyLevel.value_to_name.get(self.consistency_level, 'Not Set') return (u'' % @@ -836,7 +873,7 @@ def populate(self, max_wait=2.0): def _execute(self, query, parameters, time_spent, max_wait): # in case the user switched the row factory, set it to namedtuple for this query - future = self._session._create_response_future(query, parameters, trace=False) + future = self._session._create_response_future(query, parameters, trace=False, custom_payload=None) future.row_factory = named_tuple_factory future.send_request() From 21ad7ad679d1e6be3e2cd0701881e25ae74af505 Mon Sep 17 00:00:00 2001 From: Kishan Karunaratne Date: Fri, 17 Apr 2015 14:34:21 -0700 Subject: [PATCH 1346/3726] Fixed IPv6 tests, and streamlined cluster removal --- tests/integration/__init__.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index ce3f042bf6..4b3c538145 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -26,6 +26,7 @@ import os from threading import Event import six +from subprocess import call from itertools import groupby @@ -201,11 +202,13 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True): if start: log.debug("Starting ccm %s cluster", cluster_name) cluster.start(wait_for_binary_proto=True, wait_other_notice=True) - setup_test_keyspace() + setup_test_keyspace(ipformat=ipformat) CCM_CLUSTER = cluster except Exception: - log.exception("Failed to start ccm cluster:") + log.exception("Failed to start ccm cluster. Removing cluster.") + remove_cluster() + call(["pkill", "-9", "-f", ".ccm"]) raise @@ -228,11 +231,14 @@ def teardown_package(): log.warn('Did not find cluster: %s' % cluster_name) -def setup_test_keyspace(): +def setup_test_keyspace(ipformat=None): # wait for nodes to startup time.sleep(10) - cluster = Cluster(protocol_version=PROTOCOL_VERSION) + if not ipformat: + cluster = Cluster(protocol_version=PROTOCOL_VERSION) + else: + cluster = Cluster(contact_points=["::1"], protocol_version=PROTOCOL_VERSION) session = cluster.connect() try: From f4191903b7e85e82849c20401d5e776bf45d2a9e Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 17 Apr 2015 16:51:52 -0500 Subject: [PATCH 1347/3726] Docs for custom_payload --- cassandra/cluster.py | 17 +++++++++-------- cassandra/query.py | 11 ++++------- docs/api/index.rst | 1 + 3 files changed, 14 insertions(+), 15 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index e84afd2ef1..28885ae9aa 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1390,9 +1390,9 @@ def execute(self, query, parameters=None, timeout=_NOT_SET, trace=False, custom_ trace details, the :attr:`~.Statement.trace` attribute will be left as :const:`None`. - `custom_payload` is a dict as described in TODO section. If `query` is a Statement - with its own custom_payload. the message will be a union of the two, - with the values specified here taking precedence. + `custom_payload` is a :ref:`custom_payload` dict to be passed to the server. + If `query` is a Statement with its own custom_payload. The message payload + will be a union of the two, with the values specified here taking precedence. """ if timeout is _NOT_SET: timeout = self.default_timeout @@ -1426,12 +1426,13 @@ def execute_async(self, query, parameters=None, trace=False, custom_payload=None :meth:`.ResponseFuture.get_query_trace()` after the request completes to retrieve a :class:`.QueryTrace` instance. - `custom_payload` is a dict as described in TODO section. If `query` is - a Statement with a custom_payload specified. the message will be a - union of the two, with the values specified here taking precedence. + `custom_payload` is a :ref:`custom_payload` dict to be passed to the server. + If `query` is a Statement with its own custom_payload. The message payload + will be a union of the two, with the values specified here taking precedence. If the server sends a custom payload in the response message, - the dict can be obtained via :attr:`.ResponseFuture.custom_payload` + the dict can be obtained following :meth:`.ResponseFuture.result` via + :attr:`.ResponseFuture.custom_payload` Example usage:: @@ -1546,7 +1547,7 @@ def prepare(self, query, custom_payload=None): Preparing the same query more than once will likely affect performance. `custom_payload` is a key value map to be passed along with the prepare - message. See TODO: refer to doc section + message. See :ref:`custom_payload`. """ message = PrepareMessage(query=query) message.custom_payload = custom_payload diff --git a/cassandra/query.py b/cassandra/query.py index 70d16e9e40..722e0d59c1 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -199,11 +199,7 @@ class Statement(object): custom_payload = None """ - TODO: refer to custom proto doc section - A string:binary_type dict holding custom key/value pairs to be passed - in the frame to a custom QueryHandler on the server side. - - By default these values are ignored by the server. + :ref:`custom_payload` to be passed to the server. These are only allowed when using protocol version 4 or higher. @@ -632,9 +628,10 @@ def __init__(self, batch_type=BatchType.LOGGED, retry_policy=None, `consistency_level` should be a :class:`~.ConsistencyLevel` value to be used for all operations in the batch. - `custom_payload` is a key-value map TODO: refer to doc section + `custom_payload` is a :ref:`custom_payload` passed to the server. Note: as Statement objects are added to the batch, this map is - updated with values from their custom payloads. + updated with any values found in their custom payloads. These are + only allowed when using protocol version 4 or higher. Example usage: diff --git a/docs/api/index.rst b/docs/api/index.rst index e0fe9810d2..340a5e0235 100644 --- a/docs/api/index.rst +++ b/docs/api/index.rst @@ -14,6 +14,7 @@ Core Driver cassandra/metrics cassandra/query cassandra/pool + cassandra/protocol cassandra/encoder cassandra/decoder cassandra/concurrent From 567f9b0216f51c9e80ea7d8ec5af96ba6acf1850 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 20 Apr 2015 10:08:20 -0500 Subject: [PATCH 1348/3726] Fix variable rename bug Peer reveiw input --- cassandra/metadata.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index b6e0aee7b3..a8e3ba9241 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -161,7 +161,7 @@ def rebuild_schema(self, ks_results, type_results, function_results, # remove not-just-added keyspaces removed_keyspaces = [name for name in self.keyspaces.keys() - if ksname not in current_keyspaces] + if name not in current_keyspaces] self.keyspaces = dict((name, meta) for name, meta in self.keyspaces.items() if name in current_keyspaces) for ksname in removed_keyspaces: From 2c062213fde00a0c30b3dcbddd2e4dea6d7c210e Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 20 Apr 2015 14:22:09 -0500 Subject: [PATCH 1349/3726] Add the missing protocol doc file --- docs/api/cassandra/protocol.rst | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100644 docs/api/cassandra/protocol.rst diff --git a/docs/api/cassandra/protocol.rst b/docs/api/cassandra/protocol.rst new file mode 100644 index 0000000000..a6b4d1c41a --- /dev/null +++ b/docs/api/cassandra/protocol.rst @@ -0,0 +1,11 @@ +.. _custom_payload: + +Custom Payload +============== +Native protocol version 4+ allows for a custom payload to be sent between clients +and custom query handlers. The payload is specified as a string:binary_type dict +holding custom key/value pairs. + +By default these are ignored by the server. They can be useful for servers implementing +a custom QueryHandler. + From 35c6ce5365345571832028d5bdb97efd1a4a0435 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 20 Apr 2015 15:58:44 -0500 Subject: [PATCH 1350/3726] Refactor redundant Connection factory methods to base --- cassandra/connection.py | 18 ++++++++++++++++++ cassandra/io/asyncorereactor.py | 13 ------------- cassandra/io/eventletreactor.py | 13 ------------- cassandra/io/geventreactor.py | 13 ------------- cassandra/io/libevreactor.py | 13 ------------- cassandra/io/twistedreactor.py | 18 ------------------ 6 files changed, 18 insertions(+), 70 deletions(-) diff --git a/cassandra/connection.py b/cassandra/connection.py index 2a020c0615..6cbd434650 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -239,6 +239,24 @@ def handle_fork(self): """ pass + @classmethod + def factory(cls, *args, **kwargs): + """ + A factory function which returns connections which have + succeeded in connecting and are ready for service (or + raises an exception otherwise). + """ + timeout = kwargs.pop('timeout', 5.0) + conn = cls(*args, **kwargs) + conn.connected_event.wait(timeout) + if conn.last_error: + raise conn.last_error + elif not conn.connected_event.is_set(): + conn.close() + raise OperationTimedOut("Timed out creating connection") + else: + return conn + def close(self): raise NotImplementedError() diff --git a/cassandra/io/asyncorereactor.py b/cassandra/io/asyncorereactor.py index 2ac7156d61..ef687c388c 100644 --- a/cassandra/io/asyncorereactor.py +++ b/cassandra/io/asyncorereactor.py @@ -156,19 +156,6 @@ def handle_fork(cls): cls._loop._cleanup() cls._loop = None - @classmethod - def factory(cls, *args, **kwargs): - timeout = kwargs.pop('timeout', 5.0) - conn = cls(*args, **kwargs) - conn.connected_event.wait(timeout) - if conn.last_error: - raise conn.last_error - elif not conn.connected_event.is_set(): - conn.close() - raise OperationTimedOut("Timed out creating connection") - else: - return conn - def __init__(self, *args, **kwargs): Connection.__init__(self, *args, **kwargs) asyncore.dispatcher.__init__(self) diff --git a/cassandra/io/eventletreactor.py b/cassandra/io/eventletreactor.py index ceac6a951e..670d0f1865 100644 --- a/cassandra/io/eventletreactor.py +++ b/cassandra/io/eventletreactor.py @@ -57,19 +57,6 @@ class EventletConnection(Connection): def initialize_reactor(cls): eventlet.monkey_patch() - @classmethod - def factory(cls, *args, **kwargs): - timeout = kwargs.pop('timeout', 5.0) - conn = cls(*args, **kwargs) - conn.connected_event.wait(timeout) - if conn.last_error: - raise conn.last_error - elif not conn.connected_event.is_set(): - conn.close() - raise OperationTimedOut("Timed out creating connection") - else: - return conn - def __init__(self, *args, **kwargs): Connection.__init__(self, *args, **kwargs) diff --git a/cassandra/io/geventreactor.py b/cassandra/io/geventreactor.py index 4cd9c68109..6e9af0da4d 100644 --- a/cassandra/io/geventreactor.py +++ b/cassandra/io/geventreactor.py @@ -50,19 +50,6 @@ class GeventConnection(Connection): _write_watcher = None _socket = None - @classmethod - def factory(cls, *args, **kwargs): - timeout = kwargs.pop('timeout', 5.0) - conn = cls(*args, **kwargs) - conn.connected_event.wait(timeout) - if conn.last_error: - raise conn.last_error - elif not conn.connected_event.is_set(): - conn.close() - raise OperationTimedOut("Timed out creating connection") - else: - return conn - def __init__(self, *args, **kwargs): Connection.__init__(self, *args, **kwargs) diff --git a/cassandra/io/libevreactor.py b/cassandra/io/libevreactor.py index db11eaf8be..93b4c97854 100644 --- a/cassandra/io/libevreactor.py +++ b/cassandra/io/libevreactor.py @@ -236,19 +236,6 @@ def handle_fork(cls): cls._libevloop._cleanup() cls._libevloop = None - @classmethod - def factory(cls, *args, **kwargs): - timeout = kwargs.pop('timeout', 5.0) - conn = cls(*args, **kwargs) - conn.connected_event.wait(timeout) - if conn.last_error: - raise conn.last_error - elif not conn.connected_event.is_set(): - conn.close() - raise OperationTimedOut("Timed out creating new connection") - else: - return conn - def __init__(self, *args, **kwargs): Connection.__init__(self, *args, **kwargs) diff --git a/cassandra/io/twistedreactor.py b/cassandra/io/twistedreactor.py index 1a5a64e796..ff81e5613f 100644 --- a/cassandra/io/twistedreactor.py +++ b/cassandra/io/twistedreactor.py @@ -148,24 +148,6 @@ def initialize_reactor(cls): if not cls._loop: cls._loop = TwistedLoop() - @classmethod - def factory(cls, *args, **kwargs): - """ - A factory function which returns connections which have - succeeded in connecting and are ready for service (or - raises an exception otherwise). - """ - timeout = kwargs.pop('timeout', 5.0) - conn = cls(*args, **kwargs) - conn.connected_event.wait(timeout) - if conn.last_error: - raise conn.last_error - elif not conn.connected_event.is_set(): - conn.close() - raise OperationTimedOut("Timed out creating connection") - else: - return conn - def __init__(self, *args, **kwargs): """ Initialization method. From 833feeb9a06c00fb5afb1f80d6880799a5ec451f Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 20 Apr 2015 15:31:48 -0500 Subject: [PATCH 1351/3726] Deprecate cqlengine.Float(double_precision=True) Overload deprecated in favor of distinct Float, Double types PYTHON-246 --- cassandra/cqlengine/columns.py | 38 ++++++++++----- docs/api/cassandra/cqlengine/columns.rst | 2 + .../cqlengine/columns/test_value_io.py | 47 +++++++++++++++---- .../integration/cqlengine/model/test_udts.py | 4 +- 4 files changed, 69 insertions(+), 22 deletions(-) diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index ec6a468f60..ae17779590 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -591,18 +591,9 @@ def to_python(self, value): return self.validate(value) -class Float(Column): - """ - Stores a floating point value - """ - db_type = 'double' - - def __init__(self, double_precision=True, **kwargs): - self.db_type = 'double' if double_precision else 'float' - super(Float, self).__init__(**kwargs) - +class BaseFloat(Column): def validate(self, value): - value = super(Float, self).validate(value) + value = super(BaseFloat, self).validate(value) if value is None: return try: @@ -617,6 +608,31 @@ def to_database(self, value): return self.validate(value) +class Float(BaseFloat): + """ + Stores a single-precision floating-point value + """ + db_type = 'float' + + def __init__(self, double_precision=None, **kwargs): + if double_precision is None or bool(double_precision): + msg = "Float(double_precision=True) is deprecated. Use Double() type instead." + double_precision = True + warnings.warn(msg, DeprecationWarning) + log.warning(msg) + + self.db_type = 'double' if double_precision else 'float' + + super(Float, self).__init__(**kwargs) + + +class Double(BaseFloat): + """ + Stores a double-precision floating-point value + """ + db_type = 'double' + + class Decimal(Column): """ Stores a variable precision decimal value diff --git a/docs/api/cassandra/cqlengine/columns.rst b/docs/api/cassandra/cqlengine/columns.rst index 425e01e03a..a214d2cd94 100644 --- a/docs/api/cassandra/cqlengine/columns.rst +++ b/docs/api/cassandra/cqlengine/columns.rst @@ -58,6 +58,8 @@ Columns of all types are initialized by passing :class:`.Column` attributes to t .. autoclass:: Decimal(**kwargs) +.. autoclass:: Double + .. autoclass:: Float .. autoclass:: Integer(**kwargs) diff --git a/tests/integration/cqlengine/columns/test_value_io.py b/tests/integration/cqlengine/columns/test_value_io.py index 2ebfa89b34..04be247a84 100644 --- a/tests/integration/cqlengine/columns/test_value_io.py +++ b/tests/integration/cqlengine/columns/test_value_io.py @@ -50,8 +50,9 @@ class BaseColumnIOTest(BaseCassEngTestCase): def setUpClass(cls): super(BaseColumnIOTest, cls).setUpClass() - #if the test column hasn't been defined, bail out - if not cls.column: return + # if the test column hasn't been defined, bail out + if not cls.column: + return # create a table with the given column class IOTestModel(Model): @@ -62,7 +63,7 @@ class IOTestModel(Model): cls._generated_model = IOTestModel sync_table(cls._generated_model) - #tupleify the tested values + # tupleify the tested values if not isinstance(cls.pkey_val, tuple): cls.pkey_val = cls.pkey_val, if not isinstance(cls.data_val, tuple): @@ -71,7 +72,8 @@ class IOTestModel(Model): @classmethod def tearDownClass(cls): super(BaseColumnIOTest, cls).tearDownClass() - if not cls.column: return + if not cls.column: + return drop_table(cls._generated_model) def comparator_converter(self, val): @@ -80,31 +82,35 @@ def comparator_converter(self, val): def test_column_io(self): """ Tests the given models class creates and retrieves values as expected """ - if not self.column: return + if not self.column: + return for pkey, data in zip(self.pkey_val, self.data_val): - #create + # create m1 = self._generated_model.create(pkey=pkey, data=data) - #get + # get m2 = self._generated_model.get(pkey=pkey) assert m1.pkey == m2.pkey == self.comparator_converter(pkey), self.column assert m1.data == m2.data == self.comparator_converter(data), self.column - #delete + # delete self._generated_model.filter(pkey=pkey).delete() + class TestBlobIO(BaseColumnIOTest): column = columns.Blob pkey_val = six.b('blake'), uuid4().bytes data_val = six.b('eggleston'), uuid4().bytes + class TestBlobIO2(BaseColumnIOTest): column = columns.Blob pkey_val = bytearray(six.b('blake')), uuid4().bytes data_val = bytearray(six.b('eggleston')), uuid4().bytes + class TestTextIO(BaseColumnIOTest): column = columns.Text @@ -118,18 +124,21 @@ class TestNonBinaryTextIO(BaseColumnIOTest): pkey_val = 'bacon' data_val = '0xmonkey' + class TestInteger(BaseColumnIOTest): column = columns.Integer pkey_val = 5 data_val = 6 + class TestBigInt(BaseColumnIOTest): column = columns.BigInt pkey_val = 6 data_val = pow(2, 63) - 1 + class TestDateTime(BaseColumnIOTest): column = columns.DateTime @@ -138,6 +147,7 @@ class TestDateTime(BaseColumnIOTest): pkey_val = now data_val = now + timedelta(days=1) + class TestDate(BaseColumnIOTest): column = columns.Date @@ -146,6 +156,7 @@ class TestDate(BaseColumnIOTest): pkey_val = now data_val = now + timedelta(days=1) + class TestUUID(BaseColumnIOTest): column = columns.UUID @@ -156,6 +167,7 @@ class TestUUID(BaseColumnIOTest): def comparator_converter(self, val): return val if isinstance(val, UUID) else UUID(val) + class TestTimeUUID(BaseColumnIOTest): column = columns.TimeUUID @@ -166,13 +178,29 @@ class TestTimeUUID(BaseColumnIOTest): def comparator_converter(self, val): return val if isinstance(val, UUID) else UUID(val) + +# until Floats are implicitly single: +class FloatSingle(columns.Float): + def __init__(self, **kwargs): + super(FloatSingle, self).__init__(double_precision=False, **kwargs) + + class TestFloatIO(BaseColumnIOTest): - column = columns.Float + column = FloatSingle + + pkey_val = 4.75 + data_val = -1.5 + + +class TestDoubleIO(BaseColumnIOTest): + + column = columns.Double pkey_val = 3.14 data_val = -1982.11 + class TestDecimalIO(BaseColumnIOTest): column = columns.Decimal @@ -183,6 +211,7 @@ class TestDecimalIO(BaseColumnIOTest): def comparator_converter(self, val): return Decimal(val) + class TestQuoter(unittest.TestCase): def test_equals(self): diff --git a/tests/integration/cqlengine/model/test_udts.py b/tests/integration/cqlengine/model/test_udts.py index ab9386b8b4..89a55d1730 100644 --- a/tests/integration/cqlengine/model/test_udts.py +++ b/tests/integration/cqlengine/model/test_udts.py @@ -196,7 +196,7 @@ class AllDatatypes(UserType): e = columns.Date() f = columns.DateTime() g = columns.Decimal() - h = columns.Float() + h = columns.Float(double_precision=False) i = columns.Inet() j = columns.Integer() k = columns.Text() @@ -227,7 +227,7 @@ class AllDatatypes(UserType): e = columns.Date() f = columns.DateTime() g = columns.Decimal() - h = columns.Float(double_precision=True) + h = columns.Double() i = columns.Inet() j = columns.Integer() k = columns.Text() From 67eaa36776018e453042ce81f9c3645235fd9ca8 Mon Sep 17 00:00:00 2001 From: Kishan Karunaratne Date: Mon, 20 Apr 2015 16:41:47 -0700 Subject: [PATCH 1352/3726] Catch OperationTimedOut exception, removed sleeps --- tests/integration/long/test_consistency.py | 28 +++++++++++-------- .../long/test_loadbalancingpolicies.py | 26 +++++++++++++---- tests/integration/long/utils.py | 10 ++----- 3 files changed, 40 insertions(+), 24 deletions(-) diff --git a/tests/integration/long/test_consistency.py b/tests/integration/long/test_consistency.py index de3b9fd92b..bf0cf7b937 100644 --- a/tests/integration/long/test_consistency.py +++ b/tests/integration/long/test_consistency.py @@ -12,25 +12,24 @@ # See the License for the specific language governing permissions and # limitations under the License. -import struct -import traceback +import struct, logging, sys, traceback -import cassandra -from cassandra import ConsistencyLevel +from cassandra import ConsistencyLevel, OperationTimedOut, ReadTimeout, WriteTimeout, Unavailable from cassandra.cluster import Cluster -from cassandra.policies import TokenAwarePolicy, RoundRobinPolicy, \ - DowngradingConsistencyRetryPolicy +from cassandra.policies import TokenAwarePolicy, RoundRobinPolicy, DowngradingConsistencyRetryPolicy from cassandra.query import SimpleStatement from tests.integration import use_singledc, PROTOCOL_VERSION -from tests.integration.long.utils import force_stop, create_schema, \ - wait_for_down, wait_for_up, start, CoordinatorStats +from tests.integration.long.utils import (force_stop, create_schema, wait_for_down, wait_for_up, + start, CoordinatorStats) try: import unittest2 as unittest except ImportError: import unittest # noqa +log = logging.getLogger(__name__) + ALL_CONSISTENCY_LEVELS = set([ ConsistencyLevel.ANY, ConsistencyLevel.ONE, ConsistencyLevel.TWO, ConsistencyLevel.QUORUM, ConsistencyLevel.THREE, @@ -74,7 +73,14 @@ def _query(self, session, keyspace, count, consistency_level=ConsistencyLevel.ON ss = SimpleStatement('SELECT * FROM cf WHERE k = 0', consistency_level=consistency_level, routing_key=routing_key) - self.coordinator_stats.add_coordinator(session.execute_async(ss)) + while True: + try: + self.coordinator_stats.add_coordinator(session.execute_async(ss)) + break + except (OperationTimedOut, ReadTimeout): + ex_type, ex, tb = sys.exc_info() + log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) + del tb def _assert_writes_succeed(self, session, keyspace, consistency_levels): for cl in consistency_levels: @@ -103,7 +109,7 @@ def _assert_writes_fail(self, session, keyspace, consistency_levels): try: self._insert(session, keyspace, 1, cl) self._cl_expected_failure(cl) - except (cassandra.Unavailable, cassandra.WriteTimeout): + except (Unavailable, WriteTimeout): pass def _assert_reads_fail(self, session, keyspace, consistency_levels): @@ -112,7 +118,7 @@ def _assert_reads_fail(self, session, keyspace, consistency_levels): try: self._query(session, keyspace, 1, cl) self._cl_expected_failure(cl) - except (cassandra.Unavailable, cassandra.ReadTimeout): + except (Unavailable, ReadTimeout): pass def _test_tokenaware_one_node_down(self, keyspace, rf, accepted): diff --git a/tests/integration/long/test_loadbalancingpolicies.py b/tests/integration/long/test_loadbalancingpolicies.py index 6a9cb74c8e..cf5f3a191b 100644 --- a/tests/integration/long/test_loadbalancingpolicies.py +++ b/tests/integration/long/test_loadbalancingpolicies.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -import struct -import time -from cassandra import ConsistencyLevel, Unavailable +import struct, time, logging, sys, traceback + +from cassandra import ConsistencyLevel, Unavailable, OperationTimedOut, ReadTimeout from cassandra.cluster import Cluster, NoHostAvailable from cassandra.concurrent import execute_concurrent_with_args from cassandra.policies import (RoundRobinPolicy, DCAwareRoundRobinPolicy, @@ -32,6 +32,8 @@ except ImportError: import unittest # noqa +log = logging.getLogger(__name__) + class LoadBalancingPolicyTests(unittest.TestCase): @@ -59,14 +61,28 @@ def _query(self, session, keyspace, count=12, self.prepared = session.prepare(query_string) for i in range(count): - self.coordinator_stats.add_coordinator(session.execute_async(self.prepared.bind((0,)))) + while True: + try: + self.coordinator_stats.add_coordinator(session.execute_async(self.prepared.bind((0,)))) + break + except (OperationTimedOut, ReadTimeout): + ex_type, ex, tb = sys.exc_info() + log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) + del tb else: routing_key = struct.pack('>i', 0) for i in range(count): ss = SimpleStatement('SELECT * FROM %s.cf WHERE k = 0' % keyspace, consistency_level=consistency_level, routing_key=routing_key) - self.coordinator_stats.add_coordinator(session.execute_async(ss)) + while True: + try: + self.coordinator_stats.add_coordinator(session.execute_async(ss)) + break + except (OperationTimedOut, ReadTimeout): + ex_type, ex, tb = sys.exc_info() + log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) + del tb def test_roundrobin(self): use_singledc() diff --git a/tests/integration/long/utils.py b/tests/integration/long/utils.py index f5422ef233..bcdc24ac0a 100644 --- a/tests/integration/long/utils.py +++ b/tests/integration/long/utils.py @@ -135,12 +135,10 @@ def wait_for_up(cluster, node, wait=True): host = cluster.metadata.get_host(IP_FORMAT % node) time.sleep(0.1) if host and host.is_up: - # BUG: shouldn't have to, but we do - if wait: - log.debug("Sleeping 30s until host is up") - time.sleep(30) log.debug("Done waiting for node %s to be up", node) return + else: + log.debug("Host is still marked down, waiting") def wait_for_down(cluster, node, wait=True): @@ -149,10 +147,6 @@ def wait_for_down(cluster, node, wait=True): host = cluster.metadata.get_host(IP_FORMAT % node) time.sleep(0.1) if not host or not host.is_up: - # BUG: shouldn't have to, but we do - if wait: - log.debug("Sleeping 10s until host is down") - time.sleep(10) log.debug("Done waiting for node %s to be down", node) return else: From 290334be53dd660083878871c97ad313915969ee Mon Sep 17 00:00:00 2001 From: Kishan Karunaratne Date: Mon, 20 Apr 2015 17:05:51 -0700 Subject: [PATCH 1353/3726] typo fix to enable TestCodeCoverage --- tests/integration/standard/test_metadata.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 0afc15b559..0e218cff09 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -362,7 +362,7 @@ def test_export_keyspace_schema_udts(self): "Protocol 3.0+ is required for UDT change events, currently testing against %r" % (PROTOCOL_VERSION,)) - if sys.version_info[2:] != (2, 7): + if sys.version_info[0:2] != (2, 7): raise unittest.SkipTest('This test compares static strings generated from dict items, which may change orders. Test with 2.7.') cluster = Cluster(protocol_version=PROTOCOL_VERSION) @@ -559,7 +559,7 @@ def test_legacy_tables(self): if get_server_versions()[0] < (2, 1, 0): raise unittest.SkipTest('Test schema output assumes 2.1.0+ options') - if sys.version_info[2:] != (2, 7): + if sys.version_info[0:2] != (2, 7): raise unittest.SkipTest('This test compares static strings generated from dict items, which may change orders. Test with 2.7.') cli_script = """CREATE KEYSPACE legacy From b0f130b1c903cb600afd09ff994f9a9db4f01a4d Mon Sep 17 00:00:00 2001 From: Kishan Karunaratne Date: Mon, 20 Apr 2015 17:24:24 -0700 Subject: [PATCH 1354/3726] add twisted as a test dependency --- test-requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/test-requirements.txt b/test-requirements.txt index 8fc8dad04a..a90f1ad5c5 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -9,3 +9,4 @@ PyYAML pytz sure pure-sasl +twisted From 86436847aadabd3efd09d863ff76df8438876a83 Mon Sep 17 00:00:00 2001 From: Kishan Karunaratne Date: Mon, 20 Apr 2015 18:36:09 -0700 Subject: [PATCH 1355/3726] catch OperationTimedOut in consistency tests --- tests/integration/long/test_consistency.py | 13 +++++++++++-- tests/integration/long/utils.py | 4 ++-- 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/tests/integration/long/test_consistency.py b/tests/integration/long/test_consistency.py index bf0cf7b937..db752cc4bd 100644 --- a/tests/integration/long/test_consistency.py +++ b/tests/integration/long/test_consistency.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import struct, logging, sys, traceback +import struct, logging, sys, traceback, time from cassandra import ConsistencyLevel, OperationTimedOut, ReadTimeout, WriteTimeout, Unavailable from cassandra.cluster import Cluster @@ -65,7 +65,15 @@ def _insert(self, session, keyspace, count, consistency_level=ConsistencyLevel.O for i in range(count): ss = SimpleStatement('INSERT INTO cf(k, i) VALUES (0, 0)', consistency_level=consistency_level) - session.execute(ss) + while True: + try: + session.execute(ss) + break + except (OperationTimedOut, WriteTimeout): + ex_type, ex, tb = sys.exc_info() + log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) + del tb + time.sleep(1) def _query(self, session, keyspace, count, consistency_level=ConsistencyLevel.ONE): routing_key = struct.pack('>i', 0) @@ -81,6 +89,7 @@ def _query(self, session, keyspace, count, consistency_level=ConsistencyLevel.ON ex_type, ex, tb = sys.exc_info() log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) del tb + time.sleep(1) def _assert_writes_succeed(self, session, keyspace, consistency_levels): for cl in consistency_levels: diff --git a/tests/integration/long/utils.py b/tests/integration/long/utils.py index bcdc24ac0a..e0e1e530d7 100644 --- a/tests/integration/long/utils.py +++ b/tests/integration/long/utils.py @@ -133,21 +133,21 @@ def ring(node): def wait_for_up(cluster, node, wait=True): while True: host = cluster.metadata.get_host(IP_FORMAT % node) - time.sleep(0.1) if host and host.is_up: log.debug("Done waiting for node %s to be up", node) return else: log.debug("Host is still marked down, waiting") + time.sleep(1) def wait_for_down(cluster, node, wait=True): log.debug("Waiting for node %s to be down", node) while True: host = cluster.metadata.get_host(IP_FORMAT % node) - time.sleep(0.1) if not host or not host.is_up: log.debug("Done waiting for node %s to be down", node) return else: log.debug("Host is still marked up, waiting") + time.sleep(1) From 7b15fae5c80093ae479c3a04aaa8db2da3d45f1d Mon Sep 17 00:00:00 2001 From: Stefania Alborghetti Date: Tue, 21 Apr 2015 09:46:52 +0800 Subject: [PATCH 1356/3726] CASSANDRA-7814: code review comments --- cassandra/metadata.py | 38 +++++++++++++++++--------------------- 1 file changed, 17 insertions(+), 21 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 9b01a18c8e..8a11a64596 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -122,9 +122,8 @@ def rebuild_schema(self, ks_results, type_results, cf_results, col_results, trig keyspace_col_rows = col_def_rows.get(keyspace_meta.name, {}) keyspace_trigger_rows = trigger_rows.get(keyspace_meta.name, {}) for table_row in cf_def_rows.get(keyspace_meta.name, []): - self._add_table_metadata_to_ks( - keyspace_meta, table_row, keyspace_col_rows, - keyspace_trigger_rows) + table_meta = self._build_table_metadata(keyspace_meta, table_row, keyspace_col_rows, keyspace_trigger_rows) + keyspace_meta._add_table_metadata(table_meta) for usertype_row in usertype_rows.get(keyspace_meta.name, []): usertype = self._build_usertype(keyspace_meta.name, usertype_row) @@ -185,12 +184,11 @@ def table_changed(self, keyspace, table, cf_results, col_results, triggers_resul # the table was removed table_meta = keyspace_meta.tables.pop(table, None) if table_meta: - self._clear_table_indexes_in_ks(keyspace_meta, table_meta.name) + keyspace_meta._clear_table_indexes(table_meta.name) else: assert len(cf_results) == 1 - self._add_table_metadata_to_ks( - keyspace_meta, cf_results[0], {table: col_results}, - {table: triggers_result}) + table_meta = self._build_table_metadata(keyspace_meta, cf_results[0], {table: col_results}, {table: triggers_result}) + keyspace_meta._add_table_metadata(ctable_meta) def _keyspace_added(self, ksname): if self.token_map: @@ -216,20 +214,6 @@ def _build_usertype(self, keyspace, usertype_row): return UserType(usertype_row['keyspace_name'], usertype_row['type_name'], usertype_row['field_names'], type_classes) - def _add_table_metadata_to_ks(self, keyspace_metadata, row, col_rows, trigger_rows): - self._clear_table_indexes_in_ks(keyspace_metadata, row["columnfamily_name"]) - - table_metadata = self._build_table_metadata(keyspace_metadata, row, col_rows, trigger_rows) - keyspace_metadata.tables[table_metadata.name] = table_metadata - for index_name, index_metadata in table_metadata.indexes.iteritems(): - keyspace_metadata.indexes[index_name] = index_metadata - - def _clear_table_indexes_in_ks(self, keyspace_metadata, table_name): - if table_name in keyspace_metadata.tables: - table_meta = keyspace_metadata.tables[table_name] - for index_name in table_meta.indexes: - keyspace_metadata.indexes.pop(index_name, None) - def _build_table_metadata(self, keyspace_metadata, row, col_rows, trigger_rows): cfname = row["columnfamily_name"] cf_col_rows = col_rows.get(cfname, []) @@ -803,6 +787,18 @@ def resolve_user_types(self, key, types, user_type_strings): self.resolve_user_types(field_type.typename, types, user_type_strings) user_type_strings.append(user_type.as_cql_query(formatted=True)) + def _add_table_metadata(self, table_metadata): + self._clear_table_indexes(table_metadata.name) + + self.tables[table_metadata.name] = table_metadata + for index_name, index_metadata in table_metadata.indexes.iteritems(): + self.indexes[index_name] = index_metadata + + def _clear_table_indexes(self, table_name): + if table_name in self.tables: + table_meta = self.tables[table_name] + for index_name in table_meta.indexes: + self.indexes.pop(index_name, None) class UserType(object): """ From 86944d70868522803508343c52ebc45412e9dd07 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 21 Apr 2015 13:30:17 -0500 Subject: [PATCH 1357/3726] Updates to index metadata indexes. Make indexes follow table alter Make keyspace meta clear indexes on table drop Add tests for this feature PYTHON-241 --- cassandra/metadata.py | 15 ++-- tests/integration/standard/test_metadata.py | 97 ++++++++++++++++++++- 2 files changed, 103 insertions(+), 9 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 8a11a64596..dfc69e3138 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -158,6 +158,7 @@ def keyspace_changed(self, keyspace, ks_results): if old_keyspace_meta: keyspace_meta.tables = old_keyspace_meta.tables keyspace_meta.user_types = old_keyspace_meta.user_types + keyspace_meta.indexes = old_keyspace_meta.indexes if (keyspace_meta.replication_strategy != old_keyspace_meta.replication_strategy): self._keyspace_updated(keyspace) else: @@ -182,13 +183,11 @@ def table_changed(self, keyspace, table, cf_results, col_results, triggers_resul if not cf_results: # the table was removed - table_meta = keyspace_meta.tables.pop(table, None) - if table_meta: - keyspace_meta._clear_table_indexes(table_meta.name) + keyspace_meta._drop_table_metadata(table) else: assert len(cf_results) == 1 table_meta = self._build_table_metadata(keyspace_meta, cf_results[0], {table: col_results}, {table: triggers_result}) - keyspace_meta._add_table_metadata(ctable_meta) + keyspace_meta._add_table_metadata(table_meta) def _keyspace_added(self, ksname): if self.token_map: @@ -788,15 +787,15 @@ def resolve_user_types(self, key, types, user_type_strings): user_type_strings.append(user_type.as_cql_query(formatted=True)) def _add_table_metadata(self, table_metadata): - self._clear_table_indexes(table_metadata.name) + self._drop_table_metadata(table_metadata.name) self.tables[table_metadata.name] = table_metadata for index_name, index_metadata in table_metadata.indexes.iteritems(): self.indexes[index_name] = index_metadata - def _clear_table_indexes(self, table_name): - if table_name in self.tables: - table_meta = self.tables[table_name] + def _drop_table_metadata(self, table_name): + table_meta = self.tables.pop(table_name, None) + if table_meta: for index_name in table_meta.indexes: self.indexes.pop(index_name, None) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index e5b2eab3ce..a3f1db7174 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -25,7 +25,7 @@ from cassandra import AlreadyExists from cassandra.cluster import Cluster -from cassandra.metadata import (Metadata, KeyspaceMetadata, TableMetadata, +from cassandra.metadata import (Metadata, KeyspaceMetadata, TableMetadata, IndexMetadata, Token, MD5Token, TokenMap, murmur3) from cassandra.policies import SimpleConvictionPolicy from cassandra.pool import Host @@ -928,3 +928,98 @@ def test_keyspace_alter(self): new_keyspace_meta = self.cluster.metadata.keyspaces[name] self.assertNotEqual(original_keyspace_meta, new_keyspace_meta) self.assertEqual(new_keyspace_meta.durable_writes, False) + + +class IndexMapTests(unittest.TestCase): + + keyspace_name = 'index_map_tests' + + @property + def table_name(self): + return self._testMethodName.lower() + + @classmethod + def setup_class(cls): + cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION) + cls.session = cls.cluster.connect() + try: + if cls.keyspace_name in cls.cluster.metadata.keyspaces: + cls.session.execute("DROP KEYSPACE %s" % cls.keyspace_name) + + cls.session.execute( + """ + CREATE KEYSPACE %s + WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'}; + """ % cls.keyspace_name) + cls.session.set_keyspace(cls.keyspace_name) + except Exception: + cls.cluster.shutdown() + raise + + @classmethod + def teardown_class(cls): + try: + cls.session.execute("DROP KEYSPACE %s" % cls.keyspace_name) + finally: + cls.cluster.shutdown() + + def create_basic_table(self): + self.session.execute("CREATE TABLE %s (k int PRIMARY KEY, a int)" % self.table_name) + + def drop_basic_table(self): + self.session.execute("DROP TABLE %s" % self.table_name) + + def test_index_updates(self): + self.create_basic_table() + + ks_meta = self.cluster.metadata.keyspaces[self.keyspace_name] + table_meta = ks_meta.tables[self.table_name] + self.assertNotIn('a_idx', ks_meta.indexes) + self.assertNotIn('b_idx', ks_meta.indexes) + self.assertNotIn('a_idx', table_meta.indexes) + self.assertNotIn('b_idx', table_meta.indexes) + + self.session.execute("CREATE INDEX a_idx ON %s (a)" % self.table_name) + self.session.execute("ALTER TABLE %s ADD b int" % self.table_name) + self.session.execute("CREATE INDEX b_idx ON %s (b)" % self.table_name) + + ks_meta = self.cluster.metadata.keyspaces[self.keyspace_name] + table_meta = ks_meta.tables[self.table_name] + self.assertIsInstance(ks_meta.indexes['a_idx'], IndexMetadata) + self.assertIsInstance(ks_meta.indexes['b_idx'], IndexMetadata) + self.assertIsInstance(table_meta.indexes['a_idx'], IndexMetadata) + self.assertIsInstance(table_meta.indexes['b_idx'], IndexMetadata) + + # both indexes updated when index dropped + self.session.execute("DROP INDEX a_idx") + ks_meta = self.cluster.metadata.keyspaces[self.keyspace_name] + table_meta = ks_meta.tables[self.table_name] + self.assertNotIn('a_idx', ks_meta.indexes) + self.assertIsInstance(ks_meta.indexes['b_idx'], IndexMetadata) + self.assertNotIn('a_idx', table_meta.indexes) + self.assertIsInstance(table_meta.indexes['b_idx'], IndexMetadata) + + # keyspace index updated when table dropped + self.drop_basic_table() + ks_meta = self.cluster.metadata.keyspaces[self.keyspace_name] + self.assertNotIn(self.table_name, ks_meta.tables) + self.assertNotIn('a_idx', ks_meta.indexes) + self.assertNotIn('b_idx', ks_meta.indexes) + + def test_index_follows_alter(self): + self.create_basic_table() + + idx = self.table_name + '_idx' + self.session.execute("CREATE INDEX %s ON %s (a)" % (idx, self.table_name)) + ks_meta = self.cluster.metadata.keyspaces[self.keyspace_name] + table_meta = ks_meta.tables[self.table_name] + self.assertIsInstance(ks_meta.indexes[idx], IndexMetadata) + self.assertIsInstance(table_meta.indexes[idx], IndexMetadata) + self.session.execute('ALTER KEYSPACE %s WITH durable_writes = false' % self.keyspace_name) + old_meta = ks_meta + ks_meta = self.cluster.metadata.keyspaces[self.keyspace_name] + self.assertIsNot(ks_meta, old_meta) + table_meta = ks_meta.tables[self.table_name] + self.assertIsInstance(ks_meta.indexes[idx], IndexMetadata) + self.assertIsInstance(table_meta.indexes[idx], IndexMetadata) + self.drop_basic_table() From b94e31be846c8366b3ab58dc8cb838fa5448db10 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 21 Apr 2015 14:55:37 -0500 Subject: [PATCH 1358/3726] Add connect_timeout to cluster and Connection.factory PYTHON-206 --- cassandra/cluster.py | 16 +++++++++++++--- cassandra/connection.py | 7 +++---- tests/integration/standard/test_connection.py | 2 +- 3 files changed, 17 insertions(+), 8 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 9aa5912b64..7183df8903 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -427,6 +427,14 @@ def auth_provider(self, value): See :attr:`.schema_event_refresh_window` for discussion of rationale """ + connect_timeout = 5 + """ + Timeout, in seconds, for creating new connections. + + This timeout covers the entire connection negotiation, including TCP + establishment, options passing, and authentication. + """ + sessions = None control_connection = None scheduler = None @@ -465,7 +473,8 @@ def __init__(self, control_connection_timeout=2.0, idle_heartbeat_interval=30, schema_event_refresh_window=2, - topology_event_refresh_window=10): + topology_event_refresh_window=10, + connect_timeout=5): """ Any of the mutable Cluster attributes may be set as keyword arguments to the constructor. @@ -518,6 +527,7 @@ def __init__(self, self.idle_heartbeat_interval = idle_heartbeat_interval self.schema_event_refresh_window = schema_event_refresh_window self.topology_event_refresh_window = topology_event_refresh_window + self.connect_timeout = connect_timeout self._listeners = set() self._listener_lock = Lock() @@ -707,11 +717,11 @@ def connection_factory(self, address, *args, **kwargs): Intended for internal use only. """ kwargs = self._make_connection_kwargs(address, kwargs) - return self.connection_class.factory(address, *args, **kwargs) + return self.connection_class.factory(address, self.connect_timeout, *args, **kwargs) def _make_connection_factory(self, host, *args, **kwargs): kwargs = self._make_connection_kwargs(host.address, kwargs) - return partial(self.connection_class.factory, host.address, *args, **kwargs) + return partial(self.connection_class.factory, host.address, self.connect_timeout, *args, **kwargs) def _make_connection_kwargs(self, address, kwargs_dict): if self._auth_provider_callable: diff --git a/cassandra/connection.py b/cassandra/connection.py index 6cbd434650..c239313c45 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -240,20 +240,19 @@ def handle_fork(self): pass @classmethod - def factory(cls, *args, **kwargs): + def factory(cls, host, timeout, *args, **kwargs): """ A factory function which returns connections which have succeeded in connecting and are ready for service (or raises an exception otherwise). """ - timeout = kwargs.pop('timeout', 5.0) - conn = cls(*args, **kwargs) + conn = cls(host, *args, **kwargs) conn.connected_event.wait(timeout) if conn.last_error: raise conn.last_error elif not conn.connected_event.is_set(): conn.close() - raise OperationTimedOut("Timed out creating connection") + raise OperationTimedOut("Timed out creating connection (%s seconds)" % timeout) else: return conn diff --git a/tests/integration/standard/test_connection.py b/tests/integration/standard/test_connection.py index e52280642b..3261adc346 100644 --- a/tests/integration/standard/test_connection.py +++ b/tests/integration/standard/test_connection.py @@ -58,7 +58,7 @@ def get_connection(self): e = None for i in range(5): try: - conn = self.klass.factory(protocol_version=PROTOCOL_VERSION) + conn = self.klass.factory(host='127.0.0.1', timeout=5, protocol_version=PROTOCOL_VERSION) break except (OperationTimedOut, NoHostAvailable) as e: continue From a84d4c0796da4550aa37c006cf1a66efcf62c428 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 22 Apr 2015 13:28:25 -0500 Subject: [PATCH 1359/3726] six.iteritems for newly added index dict --- cassandra/metadata.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index dfc69e3138..a3f2bcc3b4 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -790,7 +790,7 @@ def _add_table_metadata(self, table_metadata): self._drop_table_metadata(table_metadata.name) self.tables[table_metadata.name] = table_metadata - for index_name, index_metadata in table_metadata.indexes.iteritems(): + for index_name, index_metadata in six.iteritems(table_metadata.indexes): self.indexes[index_name] = index_metadata def _drop_table_metadata(self, table_name): From d0ee3bc5470aea7fa67d5b80189e51cb68ec64b6 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 22 Apr 2015 12:58:50 -0500 Subject: [PATCH 1360/3726] Make DCAware LBP tolerate DC changes during query plan Fixes a RuntimeError that would raise if the DCAwareRoundRobinPolicy DC:host map changed during generation. PYTHON-297 --- cassandra/policies.py | 10 +-- tests/unit/test_policies.py | 154 ++++++++++++++++++++++++++++++++++++ 2 files changed, 159 insertions(+), 5 deletions(-) diff --git a/cassandra/policies.py b/cassandra/policies.py index 0dc36af76d..244df2411d 100644 --- a/cassandra/policies.py +++ b/cassandra/policies.py @@ -267,11 +267,11 @@ def make_query_plan(self, working_keyspace=None, query=None): for host in islice(cycle(local_live), pos, pos + len(local_live)): yield host - for dc, current_dc_hosts in six.iteritems(self._dc_live_hosts): - if dc == self.local_dc: - continue - - for host in current_dc_hosts[:self.used_hosts_per_remote_dc]: + # the dict can change, so get candidate DCs iterating over keys of a copy + other_dcs = [dc for dc in self._dc_live_hosts.copy().keys() if dc != self.local_dc] + for dc in other_dcs: + remote_live = self._dc_live_hosts.get(dc, ()) + for host in remote_live[:self.used_hosts_per_remote_dc]: yield host def on_up(self, host): diff --git a/tests/unit/test_policies.py b/tests/unit/test_policies.py index c6f049e09d..3e24f71fac 100644 --- a/tests/unit/test_policies.py +++ b/tests/unit/test_policies.py @@ -292,6 +292,160 @@ def test_status_updates(self): qplan = list(policy.make_query_plan()) self.assertEqual(qplan, []) + def test_modification_during_generation(self): + hosts = [Host(i, SimpleConvictionPolicy) for i in range(4)] + for h in hosts[:2]: + h.set_location_info("dc1", "rack1") + for h in hosts[2:]: + h.set_location_info("dc2", "rack1") + + policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=3) + policy.populate(Mock(), hosts) + + # The general concept here is to change thee internal state of the + # policy during plan generation. In this case we use a grey-box + # approach that changes specific things during known phases of the + # generator. + + new_host = Host(4, SimpleConvictionPolicy) + new_host.set_location_info("dc1", "rack1") + + # new local before iteration + plan = policy.make_query_plan() + policy.on_up(new_host) + # local list is not bound yet, so we get to see that one + self.assertEqual(len(list(plan)), 3 + 2) + + # remove local before iteration + plan = policy.make_query_plan() + policy.on_down(new_host) + # local list is not bound yet, so we don't see it + self.assertEqual(len(list(plan)), 2 + 2) + + # new local after starting iteration + plan = policy.make_query_plan() + next(plan) + policy.on_up(new_host) + # local list was is bound, and one consumed, so we only see the other original + self.assertEqual(len(list(plan)), 1 + 2) + + # remove local after traversing available + plan = policy.make_query_plan() + for _ in range(3): + next(plan) + policy.on_down(new_host) + # we should be past the local list + self.assertEqual(len(list(plan)), 0 + 2) + + # REMOTES CHANGE + new_host.set_location_info("dc2", "rack1") + + # new remote after traversing local, but not starting remote + plan = policy.make_query_plan() + for _ in range(2): + next(plan) + policy.on_up(new_host) + # list is updated before we get to it + self.assertEqual(len(list(plan)), 0 + 3) + + # remove remote after traversing local, but not starting remote + plan = policy.make_query_plan() + for _ in range(2): + next(plan) + policy.on_down(new_host) + # list is updated before we get to it + self.assertEqual(len(list(plan)), 0 + 2) + + # new remote after traversing local, and starting remote + plan = policy.make_query_plan() + for _ in range(3): + next(plan) + policy.on_up(new_host) + # slice is already made, and we've consumed one + self.assertEqual(len(list(plan)), 0 + 1) + + # remove remote after traversing local, and starting remote + plan = policy.make_query_plan() + for _ in range(3): + next(plan) + policy.on_down(new_host) + # slice is created with all present, and we've consumed one + self.assertEqual(len(list(plan)), 0 + 2) + + # local DC disappears after finishing it, but not starting remote + plan = policy.make_query_plan() + for _ in range(2): + next(plan) + policy.on_down(hosts[0]) + policy.on_down(hosts[1]) + # dict traversal starts as normal + self.assertEqual(len(list(plan)), 0 + 2) + policy.on_up(hosts[0]) + policy.on_up(hosts[1]) + + # PYTHON-297 addresses the following cases, where DCs come and go + # during generation + # local DC disappears after finishing it, and starting remote + plan = policy.make_query_plan() + for _ in range(3): + next(plan) + policy.on_down(hosts[0]) + policy.on_down(hosts[1]) + # dict traversal has begun and consumed one + self.assertEqual(len(list(plan)), 0 + 1) + policy.on_up(hosts[0]) + policy.on_up(hosts[1]) + + # remote DC disappears after finishing local, but not starting remote + plan = policy.make_query_plan() + for _ in range(2): + next(plan) + policy.on_down(hosts[2]) + policy.on_down(hosts[3]) + # nothing left + self.assertEqual(len(list(plan)), 0 + 0) + policy.on_up(hosts[2]) + policy.on_up(hosts[3]) + + # remote DC disappears while traversing it + plan = policy.make_query_plan() + for _ in range(3): + next(plan) + policy.on_down(hosts[2]) + policy.on_down(hosts[3]) + # we continue with remainder of original list + self.assertEqual(len(list(plan)), 0 + 1) + policy.on_up(hosts[2]) + policy.on_up(hosts[3]) + + + another_host = Host(5, SimpleConvictionPolicy) + another_host.set_location_info("dc3", "rack1") + new_host.set_location_info("dc3", "rack1") + + # new DC while traversing remote + plan = policy.make_query_plan() + for _ in range(3): + next(plan) + policy.on_up(new_host) + policy.on_up(another_host) + # we continue with remainder of original list + self.assertEqual(len(list(plan)), 0 + 1) + + # remote DC disappears after finishing it + plan = policy.make_query_plan() + for _ in range(3): + next(plan) + last_host_in_this_dc = next(plan) + if last_host_in_this_dc in (new_host, another_host): + down_hosts = [new_host, another_host] + else: + down_hosts = hosts[2:] + for h in down_hosts: + policy.on_down(h) + # the last DC has two + self.assertEqual(len(list(plan)), 0 + 2) + def test_no_live_nodes(self): """ Ensure query plan for a downed cluster will execute without errors From 8fe0fa5f7fadd9dad2a545dde6da79e118882639 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 23 Apr 2015 10:03:18 -0500 Subject: [PATCH 1361/3726] Remove race when adding new host during node/token rebuild Fixes an issue where a race could cause None return from Cluster.add_host during node/token list rebuild. This would cause None to be inserted into the Token map, and subsequently blow up TokenAware LBP creating a query plan. PYTHON-298 --- cassandra/cluster.py | 28 +++++++++++---------- cassandra/metadata.py | 28 +++++++++------------ tests/integration/standard/test_metadata.py | 1 - 3 files changed, 27 insertions(+), 30 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index e523e38162..c7b2be7b73 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -64,14 +64,13 @@ from cassandra.policies import (RoundRobinPolicy, SimpleConvictionPolicy, ExponentialReconnectionPolicy, HostDistance, RetryPolicy) -from cassandra.pool import (_ReconnectionHandler, _HostReconnectionHandler, +from cassandra.pool import (Host, _ReconnectionHandler, _HostReconnectionHandler, HostConnectionPool, HostConnection, NoConnectionsAvailable) from cassandra.query import (SimpleStatement, PreparedStatement, BoundStatement, BatchStatement, bind_params, QueryTrace, Statement, named_tuple_factory, dict_factory, FETCH_SIZE_UNSET) - def _is_eventlet_monkey_patched(): if 'eventlet.patcher' not in sys.modules: return False @@ -525,7 +524,7 @@ def __init__(self, # let Session objects be GC'ed (and shutdown) when the user no longer # holds a reference. self.sessions = WeakSet() - self.metadata = Metadata(self) + self.metadata = Metadata() self.control_connection = None self._prepared_statements = WeakValueDictionary() self._prepared_statement_lock = Lock() @@ -743,8 +742,8 @@ def connect(self, keyspace=None): self.connection_class.initialize_reactor() atexit.register(partial(_shutdown_cluster, self)) for address in self.contact_points: - host = self.add_host(address, signal=False) - if host: + host, new = self.add_host(address, signal=False) + if new: host.set_up() for listener in self.listeners: listener.on_add(host) @@ -1069,15 +1068,17 @@ def signal_connection_failure(self, host, connection_exc, is_host_addition, expe def add_host(self, address, datacenter=None, rack=None, signal=True, refresh_nodes=True): """ Called when adding initial contact points and when the control - connection subsequently discovers a new node. Intended for internal - use only. + connection subsequently discovers a new node. + Returns a Host instance, and a flag indicating whether it was new in + the metadata. + Intended for internal use only. """ - new_host = self.metadata.add_host(address, datacenter, rack) - if new_host and signal: - log.info("New Cassandra host %r discovered", new_host) - self.on_add(new_host, refresh_nodes) + host, new = self.metadata.add_or_return_host(Host(address, self.conviction_policy_factory, datacenter, rack)) + if new and signal: + log.info("New Cassandra host %r discovered", host) + self.on_add(host, refresh_nodes) - return new_host + return host, new def remove_host(self, host): """ @@ -2158,6 +2159,7 @@ def refresh_node_list_and_token_map(self, force_token_rebuild=False): def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, force_token_rebuild=False): + if preloaded_results: log.debug("[control connection] Refreshing node list and token map using preloaded results") peers_result = preloaded_results[0] @@ -2215,7 +2217,7 @@ def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, rack = row.get("rack") if host is None: log.debug("[control connection] Found new host to connect to: %s", addr) - host = self._cluster.add_host(addr, datacenter, rack, signal=True, refresh_nodes=False) + host, _ = self._cluster.add_host(addr, datacenter, rack, signal=True, refresh_nodes=False) should_rebuild_token_map = True else: should_rebuild_token_map |= self._update_location_info(host, datacenter, rack) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 1f88d8f9e6..8ca84fc846 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -20,7 +20,6 @@ import logging import re from threading import RLock -import weakref import six murmur3 = None @@ -31,7 +30,6 @@ import cassandra.cqltypes as types from cassandra.marshal import varint_unpack -from cassandra.pool import Host from cassandra.util import OrderedDict log = logging.getLogger(__name__) @@ -72,11 +70,7 @@ class Metadata(object): token_map = None """ A :class:`~.TokenMap` instance describing the ring topology. """ - def __init__(self, cluster): - # use a weak reference so that the Cluster object can be GC'ed. - # Normally the cycle detector would handle this, but implementing - # __del__ disables that. - self.cluster_ref = weakref.ref(cluster) + def __init__(self): self.keyspaces = {} self._hosts = {} self._hosts_lock = RLock() @@ -451,17 +445,19 @@ def can_support_partitioner(self): else: return True - def add_host(self, address, datacenter, rack): - cluster = self.cluster_ref() + def add_or_return_host(self, host): + """ + Returns a tuple (host, new), where ``host`` is a Host + instance, and ``new`` is a bool indicating whether + the host was newly added. + """ with self._hosts_lock: - if address not in self._hosts: - new_host = Host( - address, cluster.conviction_policy_factory, datacenter, rack) - self._hosts[address] = new_host - else: - return None + try: + return self._hosts[host.address], False + except KeyError: + self._hosts[host.address] = host + return host, True - return new_host def remove_host(self, host): with self._hosts_lock: diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index e5b2eab3ce..eab46d0dce 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -143,7 +143,6 @@ def test_basic_table_meta_properties(self): self.cluster.control_connection.refresh_schema() meta = self.cluster.metadata - self.assertNotEqual(meta.cluster_ref, None) self.assertNotEqual(meta.cluster_name, None) self.assertTrue(self.ksname in meta.keyspaces) ksmeta = meta.keyspaces[self.ksname] From 25f10a574c1cbb789eaeb19c16b30dfd92bb7d90 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 23 Apr 2015 10:47:34 -0500 Subject: [PATCH 1362/3726] Update unit test for new Metadata init signature. --- tests/unit/test_metadata.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unit/test_metadata.py b/tests/unit/test_metadata.py index 0c59598a23..5ce78f2e25 100644 --- a/tests/unit/test_metadata.py +++ b/tests/unit/test_metadata.py @@ -316,7 +316,7 @@ def test_build_index_as_cql(self): column_meta.name = 'column_name_here' column_meta.table.name = 'table_name_here' column_meta.table.keyspace.name = 'keyspace_name_here' - meta_model = Metadata(Mock()) + meta_model = Metadata() row = {'index_name': 'index_name_here', 'index_type': 'index_type_here'} index_meta = meta_model._build_index_metadata(column_meta, row) From c2b999c5ee1f6931f581d0e77517287f4ed7935d Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 23 Apr 2015 12:47:23 -0500 Subject: [PATCH 1363/3726] Ensure serial consistency gets set on statements and messages PYTHON-299 --- cassandra/query.py | 1 + tests/integration/standard/test_query.py | 44 ++++++++++++++++++++---- 2 files changed, 39 insertions(+), 6 deletions(-) diff --git a/cassandra/query.py b/cassandra/query.py index e973ed2b2e..8bc156f1bb 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -248,6 +248,7 @@ def _set_serial_consistency_level(self, serial_consistency_level): raise ValueError( "serial_consistency_level must be either ConsistencyLevel.SERIAL " "or ConsistencyLevel.LOCAL_SERIAL") + self._serial_consistency_level = serial_consistency_level def _del_serial_consistency_level(self): self._serial_consistency_level = None diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index 780f307891..f207c45367 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -325,14 +325,22 @@ def test_conditional_update(self): statement = SimpleStatement( "UPDATE test3rf.test SET v=1 WHERE k=0 IF v=1", serial_consistency_level=ConsistencyLevel.SERIAL) - result = self.session.execute(statement) + # crazy test, but PYTHON-299 + # TODO: expand to check more parameters get passed to statement, and on to messages + self.assertEqual(statement.serial_consistency_level, ConsistencyLevel.SERIAL) + future = self.session.execute_async(statement) + result = future.result() + self.assertEqual(future.message.serial_consistency_level, ConsistencyLevel.SERIAL) self.assertEqual(1, len(result)) self.assertFalse(result[0].applied) statement = SimpleStatement( "UPDATE test3rf.test SET v=1 WHERE k=0 IF v=0", - serial_consistency_level=ConsistencyLevel.SERIAL) - result = self.session.execute(statement) + serial_consistency_level=ConsistencyLevel.LOCAL_SERIAL) + self.assertEqual(statement.serial_consistency_level, ConsistencyLevel.LOCAL_SERIAL) + future = self.session.execute_async(statement) + result = future.result() + self.assertEqual(future.message.serial_consistency_level, ConsistencyLevel.LOCAL_SERIAL) self.assertEqual(1, len(result)) self.assertTrue(result[0].applied) @@ -342,15 +350,39 @@ def test_conditional_update_with_prepared_statements(self): "UPDATE test3rf.test SET v=1 WHERE k=0 IF v=2") statement.serial_consistency_level = ConsistencyLevel.SERIAL - result = self.session.execute(statement) + future = self.session.execute_async(statement) + result = future.result() + self.assertEqual(future.message.serial_consistency_level, ConsistencyLevel.SERIAL) self.assertEqual(1, len(result)) self.assertFalse(result[0].applied) statement = self.session.prepare( "UPDATE test3rf.test SET v=1 WHERE k=0 IF v=0") bound = statement.bind(()) - bound.serial_consistency_level = ConsistencyLevel.SERIAL - result = self.session.execute(statement) + bound.serial_consistency_level = ConsistencyLevel.LOCAL_SERIAL + future = self.session.execute_async(bound) + result = future.result() + self.assertEqual(future.message.serial_consistency_level, ConsistencyLevel.LOCAL_SERIAL) + self.assertEqual(1, len(result)) + self.assertTrue(result[0].applied) + + def test_conditional_update_with_batch_statements(self): + self.session.execute("INSERT INTO test3rf.test (k, v) VALUES (0, 0)") + statement = BatchStatement(serial_consistency_level=ConsistencyLevel.SERIAL) + statement.add("UPDATE test3rf.test SET v=1 WHERE k=0 IF v=1") + self.assertEqual(statement.serial_consistency_level, ConsistencyLevel.SERIAL) + future = self.session.execute_async(statement) + result = future.result() + self.assertEqual(future.message.serial_consistency_level, ConsistencyLevel.SERIAL) + self.assertEqual(1, len(result)) + self.assertFalse(result[0].applied) + + statement = BatchStatement(serial_consistency_level=ConsistencyLevel.LOCAL_SERIAL) + statement.add("UPDATE test3rf.test SET v=1 WHERE k=0 IF v=0") + self.assertEqual(statement.serial_consistency_level, ConsistencyLevel.LOCAL_SERIAL) + future = self.session.execute_async(statement) + result = future.result() + self.assertEqual(future.message.serial_consistency_level, ConsistencyLevel.LOCAL_SERIAL) self.assertEqual(1, len(result)) self.assertTrue(result[0].applied) From 28c870daf60ecacd96c4855d8d43a4ede0a14832 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 23 Apr 2015 13:54:00 -0500 Subject: [PATCH 1364/3726] Release ver and changelog update for 2.5.1 Conflicts: cassandra/__init__.py --- CHANGELOG.rst | 10 ++++++++++ cassandra/__init__.py | 2 +- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index af2d611309..6000f16012 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,13 @@ +2.5.1 +===== +April 23, 2015 + +Bug Fixes +--------- +* Fix thread safety in DC-aware load balancing policy (PYTHON-297) +* Fix race condition in node/token rebuild (PYTHON-298) +* Set and send serial consistency parameter (PYTHON-299) + 2.5.0 ===== March 30, 2015 diff --git a/cassandra/__init__.py b/cassandra/__init__.py index fa26054fc2..2be5700099 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -23,7 +23,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (2, 5, 0) +__version_info__ = (2, 5, 1) __version__ = '.'.join(map(str, __version_info__)) From 931337b4a253d89a0ac309b305f38f6c8af0ce10 Mon Sep 17 00:00:00 2001 From: Kishan Karunaratne Date: Thu, 23 Apr 2015 18:06:39 -0700 Subject: [PATCH 1365/3726] Update date/time datatype tests to run on C* 3.0 As per CASSANDRA-7523, the new date and time types have been pushed back to C* 3.0 / protocol v4. --- tests/integration/datatype_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/datatype_utils.py b/tests/integration/datatype_utils.py index bd76c36fec..fb437d7539 100644 --- a/tests/integration/datatype_utils.py +++ b/tests/integration/datatype_utils.py @@ -57,7 +57,7 @@ def update_datatypes(): if _cass_version >= (2, 1, 0): COLLECTION_TYPES.append('tuple') - if _cass_version >= (2, 1, 5): + if _cass_version >= (3, 0, 0): PRIMITIVE_DATATYPES.append('date') PRIMITIVE_DATATYPES.append('time') From 9f0ca79a700ea06881ae67bd6a401adb31cef4df Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 24 Apr 2015 10:24:37 -0500 Subject: [PATCH 1366/3726] Add docs for ResponseFuture.custom_paylaod --- cassandra/cluster.py | 11 +++++++++++ docs/api/cassandra/cluster.rst | 2 ++ 2 files changed, 13 insertions(+) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 28885ae9aa..7d982e15d9 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2677,6 +2677,17 @@ def has_more_pages(self): @property def custom_payload(self): + """ + The custom payload returned from the server, if any. This will only be + set by Cassandra servers implementing a custom QueryHandler, and only + for protocol_version 4+. + + Ensure the future is complete before trying to access this property + (call :meth:`.result()`, or after callback is invoked). + Otherwise it may throw if the response has not been received. + + :return: :ref:`custom_payload`. + """ if not self._event.is_set(): raise Exception("custom_payload cannot be retrieved before ResponseFuture is finalized") return self._custom_payload diff --git a/docs/api/cassandra/cluster.rst b/docs/api/cassandra/cluster.rst index 6532c9a026..e6e3cf7660 100644 --- a/docs/api/cassandra/cluster.rst +++ b/docs/api/cassandra/cluster.rst @@ -102,6 +102,8 @@ .. automethod:: get_query_trace() + .. autoattribute:: custom_payload() + .. autoattribute:: has_more_pages .. automethod:: start_fetching_next_page() From 3f2a51ac0d12304a38031543c2c6afff31e9d5f1 Mon Sep 17 00:00:00 2001 From: Kishan Karunaratne Date: Fri, 24 Apr 2015 13:41:50 -0700 Subject: [PATCH 1367/3726] handle Timeout errors in LargeDataTests --- tests/integration/long/test_large_data.py | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/tests/integration/long/test_large_data.py b/tests/integration/long/test_large_data.py index 7edd28b48c..acb203568b 100644 --- a/tests/integration/long/test_large_data.py +++ b/tests/integration/long/test_large_data.py @@ -18,15 +18,22 @@ from queue import Queue, Empty # noqa from struct import pack -import unittest +import logging, sys, traceback, time -from cassandra import ConsistencyLevel +from cassandra import ConsistencyLevel, OperationTimedOut, WriteTimeout from cassandra.cluster import Cluster from cassandra.query import dict_factory from cassandra.query import SimpleStatement from tests.integration import use_singledc, PROTOCOL_VERSION from tests.integration.long.utils import create_schema +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa + +log = logging.getLogger(__name__) + def setup_module(): use_singledc() @@ -71,6 +78,11 @@ def batch_futures(self, session, statement_generator): while True: try: futures.get_nowait().result() + except (OperationTimedOut, WriteTimeout): + ex_type, ex, tb = sys.exc_info() + log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) + del tb + time.sleep(1) except Empty: break @@ -80,6 +92,11 @@ def batch_futures(self, session, statement_generator): while True: try: futures.get_nowait().result() + except (OperationTimedOut, WriteTimeout): + ex_type, ex, tb = sys.exc_info() + log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) + del tb + time.sleep(1) except Empty: break From e67e215df51f74d2032090253daaf0c1158f1e9b Mon Sep 17 00:00:00 2001 From: Kishan Karunaratne Date: Fri, 24 Apr 2015 14:50:03 -0700 Subject: [PATCH 1368/3726] Refactored SchemaTests --- tests/integration/long/test_schema.py | 125 +++++++++++++------------- 1 file changed, 61 insertions(+), 64 deletions(-) diff --git a/tests/integration/long/test_schema.py b/tests/integration/long/test_schema.py index af51924045..6f4cc0f1f4 100644 --- a/tests/integration/long/test_schema.py +++ b/tests/integration/long/test_schema.py @@ -12,10 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging +import logging, sys, traceback from cassandra import ConsistencyLevel, OperationTimedOut from cassandra.cluster import Cluster +from cassandra.protocol import ConfigurationException from cassandra.query import SimpleStatement from tests.integration import use_singledc, PROTOCOL_VERSION @@ -43,91 +44,87 @@ def teardown_class(cls): cls.cluster.shutdown() def test_recreates(self): + """ + Basic test for repeated schema creation and use, using many different keyspaces + """ + session = self.session - replication_factor = 3 for i in range(2): - for keyspace in range(5): - keyspace = 'ks_%s' % keyspace - results = session.execute('SELECT keyspace_name FROM system.schema_keyspaces') + for keyspace_number in range(5): + keyspace = "ks_{0}".format(keyspace_number) + + results = session.execute("SELECT keyspace_name FROM system.schema_keyspaces") existing_keyspaces = [row[0] for row in results] if keyspace in existing_keyspaces: - ddl = 'DROP KEYSPACE %s' % keyspace - log.debug(ddl) - session.execute(ddl) - - ddl = """ - CREATE KEYSPACE %s - WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '%s'} - """ % (keyspace, str(replication_factor)) - log.debug(ddl) - session.execute(ddl) - - ddl = 'CREATE TABLE %s.cf (k int PRIMARY KEY, i int)' % keyspace - log.debug(ddl) - session.execute(ddl) - - statement = 'USE %s' % keyspace - log.debug(ddl) - session.execute(statement) - - statement = 'INSERT INTO %s(k, i) VALUES (0, 0)' % 'cf' - log.debug(statement) - ss = SimpleStatement(statement, - consistency_level=ConsistencyLevel.QUORUM) + drop = "DROP KEYSPACE {0}".format(keyspace) + log.debug(drop) + session.execute(drop) + + create = "CREATE KEYSPACE {0} WITH replication = {{'class': 'SimpleStrategy', 'replication_factor': 3}}".format(keyspace) + log.debug(create) + session.execute(create) + + create = "CREATE TABLE {0}.cf (k int PRIMARY KEY, i int)".format(keyspace) + log.debug(create) + session.execute(create) + + use = "USE {0}".format(keyspace) + log.debug(use) + session.execute(use) + + insert = "INSERT INTO cf (k, i) VALUES (0, 0)" + log.debug(insert) + ss = SimpleStatement(insert, consistency_level=ConsistencyLevel.QUORUM) session.execute(ss) def test_for_schema_disagreements_different_keyspaces(self): + """ + Tests for any schema disagreements using many different keyspaces + """ + session = self.session for i in xrange(30): try: - session.execute(''' - CREATE KEYSPACE test_%s - WITH replication = {'class': 'SimpleStrategy', - 'replication_factor': 1} - ''' % i) - - session.execute(''' - CREATE TABLE test_%s.cf ( - key int, - value int, - PRIMARY KEY (key)) - ''' % i) + session.execute("CREATE KEYSPACE test_{0} WITH replication = {{'class': 'SimpleStrategy', 'replication_factor': 1}}".format(i)) + session.execute("CREATE TABLE test_{0}.cf (key int PRIMARY KEY, value int)".format(i)) for j in xrange(100): - session.execute('INSERT INTO test_%s.cf (key, value) VALUES (%s, %s)' % (i, j, j)) - - session.execute(''' - DROP KEYSPACE test_%s - ''' % i) + session.execute("INSERT INTO test_{0}.cf (key, value) VALUES ({1}, {1})".format(i, j)) except OperationTimedOut: - pass + ex_type, ex, tb = sys.exc_info() + log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) + del tb + finally: + try: + session.execute("DROP KEYSPACE test_{0}".format(i)) + except ConfigurationException: + # We're good, the keyspace was never created due to OperationTimedOut + pass def test_for_schema_disagreements_same_keyspace(self): + """ + Tests for any schema disagreements using the same keyspace multiple times + """ + cluster = Cluster(protocol_version=PROTOCOL_VERSION) session = cluster.connect() for i in xrange(30): try: - session.execute(''' - CREATE KEYSPACE test - WITH replication = {'class': 'SimpleStrategy', - 'replication_factor': 1} - ''') - - session.execute(''' - CREATE TABLE test.cf ( - key int, - value int, - PRIMARY KEY (key)) - ''') + session.execute("CREATE KEYSPACE test WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}") + session.execute("CREATE TABLE test.cf (key int PRIMARY KEY, value int)") for j in xrange(100): - session.execute('INSERT INTO test.cf (key, value) VALUES (%s, %s)' % (j, j)) - - session.execute(''' - DROP KEYSPACE test - ''') + session.execute("INSERT INTO test.cf (key, value) VALUES ({0}, {0})".format(j)) except OperationTimedOut: - pass + ex_type, ex, tb = sys.exc_info() + log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) + del tb + finally: + try: + session.execute("DROP KEYSPACE test") + except ConfigurationException: + # We're good, the keyspace was never created due to OperationTimedOut + pass From 2d8ee3cae0486da39749cb6c93b83150db5e42cc Mon Sep 17 00:00:00 2001 From: Kishan Karunaratne Date: Fri, 24 Apr 2015 15:11:54 -0700 Subject: [PATCH 1369/3726] Handle ReadTimeout in LightweightTransactionTests --- tests/integration/standard/test_query.py | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index f207c45367..b30c387b01 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -442,15 +442,16 @@ def test_no_connection_refused_on_timeout(self): for (success, result) in results: if success: continue - # In this case result is an exception - if type(result).__name__ == "NoHostAvailable": - self.fail("PYTHON-91: Disconnected from Cassandra: %s" % result.message) - break - if type(result).__name__ == "WriteTimeout": - received_timeout = True - continue - self.fail("Unexpected exception %s: %s" % (type(result).__name__, result.message)) - break + else: + # In this case result is an exception + if type(result).__name__ == "NoHostAvailable": + self.fail("PYTHON-91: Disconnected from Cassandra: %s" % result.message) + if type(result).__name__ == "WriteTimeout": + received_timeout = True + continue + if type(result).__name__ == "ReadTimeout": + continue + self.fail("Unexpected exception %s: %s" % (type(result).__name__, result.message)) # Make sure test passed self.assertTrue(received_timeout) From 6423e938a45f1261e8fc8b7431c0efdce188cbe9 Mon Sep 17 00:00:00 2001 From: Kishan Karunaratne Date: Fri, 24 Apr 2015 19:16:46 -0700 Subject: [PATCH 1370/3726] handle OperationTimedOut in SchemaTests --- tests/integration/long/test_schema.py | 32 ++++++++++++++++++--------- 1 file changed, 22 insertions(+), 10 deletions(-) diff --git a/tests/integration/long/test_schema.py b/tests/integration/long/test_schema.py index 6f4cc0f1f4..806726cf32 100644 --- a/tests/integration/long/test_schema.py +++ b/tests/integration/long/test_schema.py @@ -97,11 +97,17 @@ def test_for_schema_disagreements_different_keyspaces(self): log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) del tb finally: - try: - session.execute("DROP KEYSPACE test_{0}".format(i)) - except ConfigurationException: - # We're good, the keyspace was never created due to OperationTimedOut - pass + while True: + try: + session.execute("DROP KEYSPACE test_{0}".format(i)) + break + except OperationTimedOut: + ex_type, ex, tb = sys.exc_info() + log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) + del tb + except ConfigurationException: + # We're good, the keyspace was never created due to OperationTimedOut + break def test_for_schema_disagreements_same_keyspace(self): """ @@ -123,8 +129,14 @@ def test_for_schema_disagreements_same_keyspace(self): log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) del tb finally: - try: - session.execute("DROP KEYSPACE test") - except ConfigurationException: - # We're good, the keyspace was never created due to OperationTimedOut - pass + while True: + try: + session.execute("DROP KEYSPACE test") + break + except OperationTimedOut: + ex_type, ex, tb = sys.exc_info() + log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) + del tb + except ConfigurationException: + # We're good, the keyspace was never created due to OperationTimedOut + break From 1cffc9168e336e7d27d402b8abd34dca28fbd377 Mon Sep 17 00:00:00 2001 From: Kishan Karunaratne Date: Fri, 24 Apr 2015 20:10:19 -0700 Subject: [PATCH 1371/3726] Handle OperationTimedOut in SchemaMetadataTests in TearDown --- tests/integration/standard/test_metadata.py | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 9b5e73c6f9..6e2bfc402c 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -19,10 +19,9 @@ import difflib from mock import Mock -import six -import sys +import six, logging, sys, traceback -from cassandra import AlreadyExists +from cassandra import AlreadyExists, OperationTimedOut from cassandra.cluster import Cluster from cassandra.metadata import (Metadata, KeyspaceMetadata, TableMetadata, IndexMetadata, @@ -33,6 +32,8 @@ from tests.integration import (get_cluster, use_singledc, PROTOCOL_VERSION, get_server_versions) +log = logging.getLogger(__name__) + def setup_module(): use_singledc() @@ -54,8 +55,15 @@ def setUp(self): self.session.execute("CREATE KEYSPACE schemametadatatest WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'}") def tearDown(self): - self.session.execute("DROP KEYSPACE schemametadatatest") - self.cluster.shutdown() + while True: + try: + self.session.execute("DROP KEYSPACE schemametadatatest") + self.cluster.shutdown() + break + except OperationTimedOut: + ex_type, ex, tb = sys.exc_info() + log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) + del tb def make_create_statement(self, partition_cols, clustering_cols=None, other_cols=None, compact=False): clustering_cols = clustering_cols or [] From 7c03eacf70b3b203fe4d819a3e62e8b7a82bbe5e Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 27 Apr 2015 09:37:31 -0500 Subject: [PATCH 1372/3726] Fix cqlengine UDT example in docs --- docs/cqlengine/models.rst | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/docs/cqlengine/models.rst b/docs/cqlengine/models.rst index 002ce3de65..dffd06fb3f 100644 --- a/docs/cqlengine/models.rst +++ b/docs/cqlengine/models.rst @@ -199,12 +199,10 @@ are only created, presisted, and queried via table Models. A short example to in name = Text(primary_key=True) addr = UserDefinedType(address) - sync_table(users) - - users.create(name="Joe", addr=address(street="Easy St.", zip=99999)) + users.create(name="Joe", addr=address(street="Easy St.", zipcode=99999)) user = users.objects(name="Joe")[0] print user.name, user.addr - # Joe {'street': Easy St., 'zipcode': None} + # Joe address(street=u'Easy St.', zipcode=99999) UDTs are modeled by inheriting :class:`~.usertype.UserType`, and setting column type attributes. Types are then used in defining models by declaring a column of type :class:`~.columns.UserDefinedType`, with the ``UserType`` class as a parameter. From d3d60206d49fc0ffde71daa1b75b8e1f07d1ade9 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 30 Apr 2015 10:42:36 -0500 Subject: [PATCH 1373/3726] Basic timer management, and timer impl for Asyncore --- cassandra/connection.py | 63 +++++++++++++++++++++++++++++++++ cassandra/io/asyncorereactor.py | 49 ++++++++++++------------- 2 files changed, 85 insertions(+), 27 deletions(-) diff --git a/cassandra/connection.py b/cassandra/connection.py index c239313c45..f5ca774a0d 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -29,6 +29,7 @@ from six.moves.queue import Queue, Empty # noqa import six +from six.moves import queue from six.moves import range from cassandra import ConsistencyLevel, AuthenticationFailed, OperationTimedOut @@ -256,6 +257,10 @@ def factory(cls, host, timeout, *args, **kwargs): else: return conn + @classmethod + def create_timer(cls, timeout, callback): + raise NotImplementedError() + def close(self): raise NotImplementedError() @@ -871,3 +876,61 @@ def stop(self): def _raise_if_stopped(self): if self._shutdown_event.is_set(): raise self.ShutdownException() + + +class Timer(object): + + cancelled = False + + def __init__(self, timeout, callback): + self.end = time.time() + timeout + self.callback = callback + if timeout < 0: + self.on_timeout() + + def __lt__(self, other): + return self.end < other.end + + def cancel(self): + self.callback = self._noop + self.cancelled = True + + def on_timeout(self): + self.callback() + + def _noop(self): + pass + + +class TimerManager(object): + + def __init__(self): + self._timers = queue.PriorityQueue() + + def add_timer(self, timer): + self._timers.put_nowait(timer) + + def service_timeouts(self): + now = time.time() + while not self._timers.empty(): + timer = self._timers.get_nowait() + if timer.end < now: + timer.on_timeout() + else: + self._timers.put_nowait(timer) + break + + @property + def next_timeout(self): + try: + return self._timers.queue[0].end + except IndexError: + pass + + @property + def next_offset(self): + try: + next_end = self._timers.queue[0].end + return next_end - time.time() + except IndexError: + pass diff --git a/cassandra/io/asyncorereactor.py b/cassandra/io/asyncorereactor.py index ef687c388c..eeb319bb65 100644 --- a/cassandra/io/asyncorereactor.py +++ b/cassandra/io/asyncorereactor.py @@ -19,11 +19,12 @@ import socket import sys from threading import Event, Lock, Thread +import time import weakref from six.moves import range -from errno import EALREADY, EINPROGRESS, EWOULDBLOCK, EINVAL, EISCONN, errorcode +from errno import EALREADY, EINPROGRESS, EWOULDBLOCK, EINVAL, EISCONN try: from weakref import WeakSet except ImportError: @@ -36,9 +37,9 @@ except ImportError: ssl = None # NOQA -from cassandra import OperationTimedOut from cassandra.connection import (Connection, ConnectionShutdown, - ConnectionException, NONBLOCKING) + ConnectionException, NONBLOCKING, + Timer, TimerManager) from cassandra.protocol import RegisterMessage log = logging.getLogger(__name__) @@ -55,15 +56,17 @@ def _cleanup(loop_weakref): class AsyncoreLoop(object): + def __init__(self): self._pid = os.getpid() self._loop_lock = Lock() self._started = False self._shutdown = False - self._conns_lock = Lock() - self._conns = WeakSet() self._thread = None + + self._timers = TimerManager() + atexit.register(partial(_cleanup, weakref.ref(self))) def maybe_start(self): @@ -86,24 +89,22 @@ def maybe_start(self): def _run_loop(self): log.debug("Starting asyncore event loop") with self._loop_lock: - while True: + while not self._shutdown: try: - asyncore.loop(timeout=0.001, use_poll=True, count=1000) + asyncore.loop(timeout=0.001, use_poll=True, count=100) + self._timers.service_timeouts() + if not asyncore.socket_map: + time.sleep(0.005) except Exception: log.debug("Asyncore event loop stopped unexepectedly", exc_info=True) break - - if self._shutdown: - break - - with self._conns_lock: - if len(self._conns) == 0: - break - self._started = False log.debug("Asyncore event loop ended") + def add_timer(self, timer): + self._timers.add_timer(timer) + def _cleanup(self): self._shutdown = True if not self._thread: @@ -118,14 +119,6 @@ def _cleanup(self): log.debug("Event loop thread was joined") - def connection_created(self, connection): - with self._conns_lock: - self._conns.add(connection) - - def connection_destroyed(self, connection): - with self._conns_lock: - self._conns.discard(connection) - class AsyncoreConnection(Connection, asyncore.dispatcher): """ @@ -156,6 +149,12 @@ def handle_fork(cls): cls._loop._cleanup() cls._loop = None + @classmethod + def create_timer(self, timeout, callback): + timer = Timer(timeout, callback) + self._loop.add_timer(timer) + return timer + def __init__(self, *args, **kwargs): Connection.__init__(self, *args, **kwargs) asyncore.dispatcher.__init__(self) @@ -166,8 +165,6 @@ def __init__(self, *args, **kwargs): self.deque = deque() self.deque_lock = Lock() - self._loop.connection_created(self) - sockerr = None addresses = socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM) for (af, socktype, proto, canonname, sockaddr) in addresses: @@ -240,8 +237,6 @@ def close(self): asyncore.dispatcher.close(self) log.debug("Closed socket to %s", self.host) - self._loop.connection_destroyed(self) - if not self.is_defunct: self.error_all_callbacks( ConnectionShutdown("Connection to %s was closed" % self.host)) From 4a9e08ed7a12c0bd59c7aa96292c8c69a1a163ac Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 30 Apr 2015 10:43:55 -0500 Subject: [PATCH 1374/3726] Timer for libevreactor --- cassandra/io/libevreactor.py | 44 +++++++++--- cassandra/io/libevwrapper.c | 131 +++++++++++++++++++++++++++++++++++ 2 files changed, 166 insertions(+), 9 deletions(-) diff --git a/cassandra/io/libevreactor.py b/cassandra/io/libevreactor.py index 93b4c97854..174cfd859c 100644 --- a/cassandra/io/libevreactor.py +++ b/cassandra/io/libevreactor.py @@ -20,10 +20,10 @@ from threading import Event, Lock, Thread import weakref -from six.moves import xrange +from six.moves import range -from cassandra import OperationTimedOut -from cassandra.connection import Connection, ConnectionShutdown, NONBLOCKING +from cassandra.connection import (Connection, ConnectionShutdown, + NONBLOCKING, Timer, TimerManager) from cassandra.protocol import RegisterMessage try: import cassandra.io.libevwrapper as libev @@ -40,7 +40,7 @@ try: import ssl except ImportError: - ssl = None # NOQA + ssl = None # NOQA log = logging.getLogger(__name__) @@ -50,7 +50,6 @@ def _cleanup(loop_weakref): loop = loop_weakref() except ReferenceError: return - loop._cleanup() @@ -85,10 +84,10 @@ def __init__(self): self._loop.unref() self._preparer.start() - atexit.register(partial(_cleanup, weakref.ref(self))) + self._timers = TimerManager() + self._loop_timer = libev.Timer(self._loop, self._on_loop_timer) - def notify(self): - self._notifier.send() + atexit.register(partial(_cleanup, weakref.ref(self))) def maybe_start(self): should_start = False @@ -133,6 +132,7 @@ def _cleanup(self): conn._read_watcher.stop() del conn._read_watcher + self.notify() # wake the timer watcher log.debug("Waiting for event loop thread to join...") self._thread.join(timeout=1.0) if self._thread.is_alive(): @@ -143,6 +143,23 @@ def _cleanup(self): log.debug("Event loop thread was joined") self._loop = None + def add_timer(self, timer): + self._timers.add_timer(timer) + self._notifier.send() # wake up in case this timer is earlier + + def _update_timer(self): + if not self._shutdown: + offset = self._timers.next_offset or 100000 # none pending; will be updated again when something new happens + self._loop_timer.start(offset) + else: + self._loop_timer.stop() + + def _on_loop_timer(self): + self._timers.service_timeouts() + + def notify(self): + self._notifier.send() + def connection_created(self, conn): with self._conn_set_lock: new_live_conns = self._live_conns.copy() @@ -205,6 +222,9 @@ def _loop_will_run(self, prepare): changed = True + # TODO: update to do connection management, timer updates through dedicaterd async 'notifier' callbacks + self._update_timer() + if changed: self._notifier.send() @@ -236,6 +256,12 @@ def handle_fork(cls): cls._libevloop._cleanup() cls._libevloop = None + @classmethod + def create_timer(self, timeout, callback): + timer = Timer(timeout, callback) + self._libevloop.add_timer(timer) + return timer + def __init__(self, *args, **kwargs): Connection.__init__(self, *args, **kwargs) @@ -361,7 +387,7 @@ def push(self, data): sabs = self.out_buffer_size if len(data) > sabs: chunks = [] - for i in xrange(0, len(data), sabs): + for i in range(0, len(data), sabs): chunks.append(data[i:i + sabs]) else: chunks = [data] diff --git a/cassandra/io/libevwrapper.c b/cassandra/io/libevwrapper.c index cbac83b277..1c52a96464 100644 --- a/cassandra/io/libevwrapper.c +++ b/cassandra/io/libevwrapper.c @@ -451,6 +451,129 @@ static PyTypeObject libevwrapper_PrepareType = { (initproc)Prepare_init, /* tp_init */ }; +typedef struct libevwrapper_Timer { + PyObject_HEAD + struct ev_timer timer; + struct libevwrapper_Loop *loop; + PyObject *callback; +} libevwrapper_Timer; + +static void +Timer_dealloc(libevwrapper_Timer *self) { + Py_XDECREF(self->loop); + Py_XDECREF(self->callback); + Py_TYPE(self)->tp_free((PyObject *)self); +} + +static void timer_callback(struct ev_loop *loop, ev_timer *watcher, int revents) { + libevwrapper_Timer *self = watcher->data; + + PyObject *result = NULL; + PyGILState_STATE gstate; + + gstate = PyGILState_Ensure(); + result = PyObject_CallFunction(self->callback, NULL); + if (!result) { + PyErr_WriteUnraisable(self->callback); + } + Py_XDECREF(result); + + PyGILState_Release(gstate); +} + +static int +Timer_init(libevwrapper_Timer *self, PyObject *args, PyObject *kwds) { + PyObject *callback; + PyObject *loop; + + if (!PyArg_ParseTuple(args, "OO", &loop, &callback)) { + return -1; + } + + if (loop) { + Py_INCREF(loop); + self->loop = (libevwrapper_Loop *)loop; + } else { + return -1; + } + + if (callback) { + if (!PyCallable_Check(callback)) { + PyErr_SetString(PyExc_TypeError, "callback parameter must be callable"); + Py_XDECREF(loop); + return -1; + } + Py_INCREF(callback); + self->callback = callback; + } + ev_init(&self->timer, timer_callback); + self->timer.data = self; + return 0; +} + +static PyObject * +Timer_start(libevwrapper_Timer *self, PyObject *args) { + double timeout; + if (!PyArg_ParseTuple(args, "d", &timeout)) { + return NULL; + } + self->timer.repeat = fmax(timeout, 0.0); + ev_timer_again(self->loop->loop, &self->timer); + Py_RETURN_NONE; +} + +static PyObject * +Timer_stop(libevwrapper_Timer *self, PyObject *args) { + ev_timer_stop(self->loop->loop, &self->timer); + Py_RETURN_NONE; +} + +static PyMethodDef Timer_methods[] = { + {"start", (PyCFunction)Timer_start, METH_VARARGS, "Start the Timer watcher"}, + {"stop", (PyCFunction)Timer_stop, METH_NOARGS, "Stop the Timer watcher"}, + {NULL} /* Sentinal */ +}; + +static PyTypeObject libevwrapper_TimerType = { + PyVarObject_HEAD_INIT(NULL, 0) + "cassandra.io.libevwrapper.Timer", /*tp_name*/ + sizeof(libevwrapper_Timer), /*tp_basicsize*/ + 0, /*tp_itemsize*/ + (destructor)Timer_dealloc, /*tp_dealloc*/ + 0, /*tp_print*/ + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + 0, /*tp_compare*/ + 0, /*tp_repr*/ + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash */ + 0, /*tp_call*/ + 0, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /*tp_flags*/ + "Timer objects", /* tp_doc */ + 0, /* tp_traverse */ + 0, /* tp_clear */ + 0, /* tp_richcompare */ + 0, /* tp_weaklistoffset */ + 0, /* tp_iter */ + 0, /* tp_iternext */ + Timer_methods, /* tp_methods */ + 0, /* tp_members */ + 0, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + (initproc)Timer_init, /* tp_init */ +}; + + static PyMethodDef module_methods[] = { {NULL} /* Sentinal */ }; @@ -500,6 +623,10 @@ initlibevwrapper(void) if (PyType_Ready(&libevwrapper_AsyncType) < 0) INITERROR; + libevwrapper_TimerType.tp_new = PyType_GenericNew; + if (PyType_Ready(&libevwrapper_TimerType) < 0) + INITERROR; + # if PY_MAJOR_VERSION >= 3 module = PyModule_Create(&moduledef); # else @@ -532,6 +659,10 @@ initlibevwrapper(void) if (PyModule_AddObject(module, "Async", (PyObject *)&libevwrapper_AsyncType) == -1) INITERROR; + Py_INCREF(&libevwrapper_TimerType); + if (PyModule_AddObject(module, "Timer", (PyObject *)&libevwrapper_TimerType) == -1) + INITERROR; + if (!PyEval_ThreadsInitialized()) { PyEval_InitThreads(); } From 109d6bb8abe02789e5b78e46af5557e13bca73d5 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 30 Apr 2015 10:44:14 -0500 Subject: [PATCH 1375/3726] Update ResponseFutures to use reactor timers PYTHON-108 avoids sleep in Event.wait, which contributes to latency when using synchronous execution --- cassandra/cluster.py | 66 ++++++++++++++++-------------- cassandra/query.py | 6 +-- tests/unit/test_response_future.py | 22 +++++----- 3 files changed, 50 insertions(+), 44 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index c5c83a730f..834b96367b 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1401,17 +1401,14 @@ def execute(self, query, parameters=None, timeout=_NOT_SET, trace=False): trace details, the :attr:`~.Statement.trace` attribute will be left as :const:`None`. """ - if timeout is _NOT_SET: - timeout = self.default_timeout - if trace and not isinstance(query, Statement): raise TypeError( "The query argument must be an instance of a subclass of " "cassandra.query.Statement when trace=True") - future = self.execute_async(query, parameters, trace) + future = self.execute_async(query, parameters, trace, timeout) try: - result = future.result(timeout) + result = future.result() finally: if trace: try: @@ -1421,7 +1418,7 @@ def execute(self, query, parameters=None, timeout=_NOT_SET, trace=False): return result - def execute_async(self, query, parameters=None, trace=False): + def execute_async(self, query, parameters=None, trace=False, timeout=_NOT_SET): """ Execute the given query and return a :class:`~.ResponseFuture` object which callbacks may be attached to for asynchronous response @@ -1458,11 +1455,14 @@ def execute_async(self, query, parameters=None, trace=False): ... log.exception("Operation failed:") """ - future = self._create_response_future(query, parameters, trace) + if timeout is _NOT_SET: + timeout = self.default_timeout + + future = self._create_response_future(query, parameters, trace, timeout) future.send_request() return future - def _create_response_future(self, query, parameters, trace): + def _create_response_future(self, query, parameters, trace, timeout): """ Returns the ResponseFuture before calling send_request() on it """ prepared_statement = None @@ -1513,7 +1513,7 @@ def _create_response_future(self, query, parameters, trace): message.tracing = True return ResponseFuture( - self, message, query, self.default_timeout, metrics=self._metrics, + self, message, query, timeout, metrics=self._metrics, prepared_statement=prepared_statement) def prepare(self, query): @@ -1543,10 +1543,10 @@ def prepare(self, query): Preparing the same query more than once will likely affect performance. """ message = PrepareMessage(query=query) - future = ResponseFuture(self, message, query=None) + future = ResponseFuture(self, message, query=None, timeout=self.default_timeout) try: future.send_request() - query_id, column_metadata = future.result(self.default_timeout) + query_id, column_metadata = future.result() except Exception: log.exception("Error preparing query:") raise @@ -1571,7 +1571,7 @@ def prepare_on_all_hosts(self, query, excluded_host): futures = [] for host in self._pools.keys(): if host != excluded_host and host.is_up: - future = ResponseFuture(self, PrepareMessage(query=query), None) + future = ResponseFuture(self, PrepareMessage(query=query), None, self.default_timeout) # we don't care about errors preparing against specific hosts, # since we can always prepare them as needed when the prepared @@ -1592,7 +1592,7 @@ def prepare_on_all_hosts(self, query, excluded_host): for host, future in futures: try: - future.result(self.default_timeout) + future.result() except Exception: log.exception("Error preparing query for host %s:", host) @@ -2579,13 +2579,14 @@ class ResponseFuture(object): _start_time = None _metrics = None _paging_state = None + _timer = None - def __init__(self, session, message, query, default_timeout=None, metrics=None, prepared_statement=None): + def __init__(self, session, message, query, timeout, metrics=None, prepared_statement=None): self.session = session self.row_factory = session.row_factory self.message = message self.query = query - self.default_timeout = default_timeout + self.timeout = timeout self._metrics = metrics self.prepared_statement = prepared_statement self._callback_lock = Lock() @@ -2596,6 +2597,17 @@ def __init__(self, session, message, query, default_timeout=None, metrics=None, self._errors = {} self._callbacks = [] self._errbacks = [] + self._start_timer() + + def _start_timer(self): + self._timer = self.session.cluster.connection_class.create_timer(self.timeout, self._on_timeout) + + def _cancel_timer(self): + if self._timer: + self._timer.cancel() + + def _on_timeout(self): + self._set_final_exception(OperationTimedOut()) def _make_query_plan(self): # convert the list/generator/etc to an iterator so that subsequent @@ -2684,6 +2696,7 @@ def start_fetching_next_page(self): self._event.clear() self._final_result = _NOT_SET self._final_exception = None + self._start_timer() self.send_request() def _reprepare(self, prepare_message): @@ -2888,6 +2901,7 @@ def _execute_after_prepare(self, response): "statement on host %s: %s" % (self._current_host, response))) def _set_final_result(self, response): + self._cancel_timer() if self._metrics is not None: self._metrics.request_timer.addValue(time.time() - self._start_time) @@ -2902,6 +2916,7 @@ def _set_final_result(self, response): fn(response, *args, **kwargs) def _set_final_exception(self, response): + self._cancel_timer() if self._metrics is not None: self._metrics.request_timer.addValue(time.time() - self._start_time) @@ -2976,27 +2991,18 @@ def result(self, timeout=_NOT_SET): ... log.exception("Operation failed:") """ - if timeout is _NOT_SET: - timeout = self.default_timeout + if timeout is not _NOT_SET: + # TODO: warn deprecated + pass + self._event.wait() if self._final_result is not _NOT_SET: if self._paging_state is None: return self._final_result else: return PagedResult(self, self._final_result, timeout) - elif self._final_exception: - raise self._final_exception else: - self._event.wait(timeout=timeout) - if self._final_result is not _NOT_SET: - if self._paging_state is None: - return self._final_result - else: - return PagedResult(self, self._final_result, timeout) - elif self._final_exception: - raise self._final_exception - else: - raise OperationTimedOut(errors=self._errors, last_host=self._current_host) + raise self._final_exception def get_query_trace(self, max_wait=None): """ @@ -3162,7 +3168,7 @@ def next(self): raise self.response_future.start_fetching_next_page() - result = self.response_future.result(self.timeout) + result = self.response_future.result() if self.response_future.has_more_pages: self.current_response = result.current_response else: diff --git a/cassandra/query.py b/cassandra/query.py index 7f7756b3fb..3a5e867f9d 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -838,14 +838,14 @@ def populate(self, max_wait=2.0): break def _execute(self, query, parameters, time_spent, max_wait): + timeout = (max_wait - time_spent) if max_wait is not None else None + future = self._session._create_response_future(query, parameters, trace=False, timeout=timeout) # in case the user switched the row factory, set it to namedtuple for this query - future = self._session._create_response_future(query, parameters, trace=False) future.row_factory = named_tuple_factory future.send_request() - timeout = (max_wait - time_spent) if max_wait is not None else None try: - return future.result(timeout=timeout) + return future.result() except OperationTimedOut: raise TraceUnavailable("Trace information was not available within %f seconds" % (max_wait,)) diff --git a/tests/unit/test_response_future.py b/tests/unit/test_response_future.py index 027fe73214..986945ba4a 100644 --- a/tests/unit/test_response_future.py +++ b/tests/unit/test_response_future.py @@ -47,7 +47,7 @@ def make_session(self): def make_response_future(self, session): query = SimpleStatement("SELECT * FROM foo") message = QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE) - return ResponseFuture(session, message, query) + return ResponseFuture(session, message, query, 1) def make_mock_response(self, results): return Mock(spec=ResultMessage, kind=RESULT_KIND_ROWS, results=results, paging_state=None) @@ -122,7 +122,7 @@ def test_read_timeout_error_message(self): query.retry_policy.on_read_timeout.return_value = (RetryPolicy.RETHROW, None) message = QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE) - rf = ResponseFuture(session, message, query) + rf = ResponseFuture(session, message, query, 1) rf.send_request() result = Mock(spec=ReadTimeoutErrorMessage, info={}) @@ -137,7 +137,7 @@ def test_write_timeout_error_message(self): query.retry_policy.on_write_timeout.return_value = (RetryPolicy.RETHROW, None) message = QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE) - rf = ResponseFuture(session, message, query) + rf = ResponseFuture(session, message, query, 1) rf.send_request() result = Mock(spec=WriteTimeoutErrorMessage, info={}) @@ -151,7 +151,7 @@ def test_unavailable_error_message(self): query.retry_policy.on_unavailable.return_value = (RetryPolicy.RETHROW, None) message = QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE) - rf = ResponseFuture(session, message, query) + rf = ResponseFuture(session, message, query, 1) rf.send_request() result = Mock(spec=UnavailableErrorMessage, info={}) @@ -165,7 +165,7 @@ def test_retry_policy_says_ignore(self): query.retry_policy.on_unavailable.return_value = (RetryPolicy.IGNORE, None) message = QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE) - rf = ResponseFuture(session, message, query) + rf = ResponseFuture(session, message, query, 1) rf.send_request() result = Mock(spec=UnavailableErrorMessage, info={}) @@ -184,7 +184,7 @@ def test_retry_policy_says_retry(self): connection = Mock(spec=Connection) pool.borrow_connection.return_value = (connection, 1) - rf = ResponseFuture(session, message, query) + rf = ResponseFuture(session, message, query, 1) rf.send_request() rf.session._pools.get.assert_called_once_with('ip1') @@ -279,7 +279,7 @@ def test_all_pools_shutdown(self): session._load_balancer.make_query_plan.return_value = ['ip1', 'ip2'] session._pools.get.return_value.is_shutdown = True - rf = ResponseFuture(session, Mock(), Mock()) + rf = ResponseFuture(session, Mock(), Mock(), 1) rf.send_request() self.assertRaises(NoHostAvailable, rf.result) @@ -354,7 +354,7 @@ def test_errback(self): query.retry_policy.on_unavailable.return_value = (RetryPolicy.RETHROW, None) message = QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE) - rf = ResponseFuture(session, message, query) + rf = ResponseFuture(session, message, query, 1) rf.send_request() rf.add_errback(self.assertIsInstance, Exception) @@ -401,7 +401,7 @@ def test_multiple_errbacks(self): query.retry_policy.on_unavailable.return_value = (RetryPolicy.RETHROW, None) message = QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE) - rf = ResponseFuture(session, message, query) + rf = ResponseFuture(session, message, query, 1) rf.send_request() callback = Mock() @@ -431,7 +431,7 @@ def test_add_callbacks(self): message = QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE) # test errback - rf = ResponseFuture(session, message, query) + rf = ResponseFuture(session, message, query, 1) rf.send_request() rf.add_callbacks( @@ -443,7 +443,7 @@ def test_add_callbacks(self): self.assertRaises(Exception, rf.result) # test callback - rf = ResponseFuture(session, message, query) + rf = ResponseFuture(session, message, query, 1) rf.send_request() callback = Mock() From 6ea1504e72f2ba83c4ab9d114dba214d593c265b Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 30 Apr 2015 11:16:18 -0500 Subject: [PATCH 1376/3726] Refactor redundant register_watcher[s] to Connection --- cassandra/connection.py | 24 +++++++++++++++++++----- cassandra/io/asyncorereactor.py | 12 ------------ cassandra/io/eventletreactor.py | 15 --------------- cassandra/io/geventreactor.py | 15 --------------- cassandra/io/libevreactor.py | 12 ------------ cassandra/io/twistedreactor.py | 21 --------------------- 6 files changed, 19 insertions(+), 80 deletions(-) diff --git a/cassandra/connection.py b/cassandra/connection.py index f5ca774a0d..81ca7dbd52 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -39,7 +39,8 @@ QueryMessage, ResultMessage, decode_response, InvalidRequestException, SupportedMessage, AuthResponseMessage, AuthChallengeMessage, - AuthSuccessMessage, ProtocolException) + AuthSuccessMessage, ProtocolException, + RegisterMessage) from cassandra.util import OrderedDict @@ -372,11 +373,24 @@ def wait_for_responses(self, *msgs, **kwargs): self.defunct(exc) raise - def register_watcher(self, event_type, callback): - raise NotImplementedError() + def register_watcher(self, event_type, callback, register_timeout=None): + """ + Register a callback for a given event type. + """ + self._push_watchers[event_type].add(callback) + self.wait_for_response( + RegisterMessage(event_list=[event_type]), + timeout=register_timeout) - def register_watchers(self, type_callback_dict): - raise NotImplementedError() + def register_watchers(self, type_callback_dict, register_timeout=None): + """ + Register multiple callback/event type pairs, expressed as a dict. + """ + for event_type, callback in type_callback_dict.items(): + self._push_watchers[event_type].add(callback) + self.wait_for_response( + RegisterMessage(event_list=type_callback_dict.keys()), + timeout=register_timeout) def control_conn_disposed(self): self.is_control_connection = False diff --git a/cassandra/io/asyncorereactor.py b/cassandra/io/asyncorereactor.py index eeb319bb65..ee089ae4fd 100644 --- a/cassandra/io/asyncorereactor.py +++ b/cassandra/io/asyncorereactor.py @@ -40,7 +40,6 @@ from cassandra.connection import (Connection, ConnectionShutdown, ConnectionException, NONBLOCKING, Timer, TimerManager) -from cassandra.protocol import RegisterMessage log = logging.getLogger(__name__) @@ -318,14 +317,3 @@ def writable(self): def readable(self): return self._readable or (self.is_control_connection and not (self.is_defunct or self.is_closed)) - - def register_watcher(self, event_type, callback, register_timeout=None): - self._push_watchers[event_type].add(callback) - self.wait_for_response( - RegisterMessage(event_list=[event_type]), timeout=register_timeout) - - def register_watchers(self, type_callback_dict, register_timeout=None): - for event_type, callback in type_callback_dict.items(): - self._push_watchers[event_type].add(callback) - self.wait_for_response( - RegisterMessage(event_list=type_callback_dict.keys()), timeout=register_timeout) diff --git a/cassandra/io/eventletreactor.py b/cassandra/io/eventletreactor.py index 670d0f1865..131cb336cc 100644 --- a/cassandra/io/eventletreactor.py +++ b/cassandra/io/eventletreactor.py @@ -28,9 +28,7 @@ from six.moves import xrange -from cassandra import OperationTimedOut from cassandra.connection import Connection, ConnectionShutdown -from cassandra.protocol import RegisterMessage log = logging.getLogger(__name__) @@ -165,16 +163,3 @@ def push(self, data): chunk_size = self.out_buffer_size for i in xrange(0, len(data), chunk_size): self._write_queue.put(data[i:i + chunk_size]) - - def register_watcher(self, event_type, callback, register_timeout=None): - self._push_watchers[event_type].add(callback) - self.wait_for_response( - RegisterMessage(event_list=[event_type]), - timeout=register_timeout) - - def register_watchers(self, type_callback_dict, register_timeout=None): - for event_type, callback in type_callback_dict.items(): - self._push_watchers[event_type].add(callback) - self.wait_for_response( - RegisterMessage(event_list=type_callback_dict.keys()), - timeout=register_timeout) diff --git a/cassandra/io/geventreactor.py b/cassandra/io/geventreactor.py index 6e9af0da4d..f315ad0506 100644 --- a/cassandra/io/geventreactor.py +++ b/cassandra/io/geventreactor.py @@ -25,9 +25,7 @@ from errno import EALREADY, EINPROGRESS, EWOULDBLOCK, EINVAL -from cassandra import OperationTimedOut from cassandra.connection import Connection, ConnectionShutdown -from cassandra.protocol import RegisterMessage log = logging.getLogger(__name__) @@ -161,16 +159,3 @@ def push(self, data): chunk_size = self.out_buffer_size for i in xrange(0, len(data), chunk_size): self._write_queue.put(data[i:i + chunk_size]) - - def register_watcher(self, event_type, callback, register_timeout=None): - self._push_watchers[event_type].add(callback) - self.wait_for_response( - RegisterMessage(event_list=[event_type]), - timeout=register_timeout) - - def register_watchers(self, type_callback_dict, register_timeout=None): - for event_type, callback in type_callback_dict.items(): - self._push_watchers[event_type].add(callback) - self.wait_for_response( - RegisterMessage(event_list=type_callback_dict.keys()), - timeout=register_timeout) diff --git a/cassandra/io/libevreactor.py b/cassandra/io/libevreactor.py index 174cfd859c..c4efbb0df5 100644 --- a/cassandra/io/libevreactor.py +++ b/cassandra/io/libevreactor.py @@ -24,7 +24,6 @@ from cassandra.connection import (Connection, ConnectionShutdown, NONBLOCKING, Timer, TimerManager) -from cassandra.protocol import RegisterMessage try: import cassandra.io.libevwrapper as libev except ImportError: @@ -395,14 +394,3 @@ def push(self, data): with self._deque_lock: self.deque.extend(chunks) self._libevloop.notify() - - def register_watcher(self, event_type, callback, register_timeout=None): - self._push_watchers[event_type].add(callback) - self.wait_for_response( - RegisterMessage(event_list=[event_type]), timeout=register_timeout) - - def register_watchers(self, type_callback_dict, register_timeout=None): - for event_type, callback in type_callback_dict.items(): - self._push_watchers[event_type].add(callback) - self.wait_for_response( - RegisterMessage(event_list=type_callback_dict.keys()), timeout=register_timeout) diff --git a/cassandra/io/twistedreactor.py b/cassandra/io/twistedreactor.py index ff81e5613f..d6534150c0 100644 --- a/cassandra/io/twistedreactor.py +++ b/cassandra/io/twistedreactor.py @@ -22,9 +22,7 @@ import weakref import atexit -from cassandra import OperationTimedOut from cassandra.connection import Connection, ConnectionShutdown -from cassandra.protocol import RegisterMessage log = logging.getLogger(__name__) @@ -220,22 +218,3 @@ def push(self, data): the event loop when it gets the chance. """ reactor.callFromThread(self.connector.transport.write, data) - - def register_watcher(self, event_type, callback, register_timeout=None): - """ - Register a callback for a given event type. - """ - self._push_watchers[event_type].add(callback) - self.wait_for_response( - RegisterMessage(event_list=[event_type]), - timeout=register_timeout) - - def register_watchers(self, type_callback_dict, register_timeout=None): - """ - Register multiple callback/event type pairs, expressed as a dict. - """ - for event_type, callback in type_callback_dict.items(): - self._push_watchers[event_type].add(callback) - self.wait_for_response( - RegisterMessage(event_list=type_callback_dict.keys()), - timeout=register_timeout) From 4d653576953fa2dffd901a009116730009974706 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 30 Apr 2015 15:09:13 -0500 Subject: [PATCH 1377/3726] Gevent timer implementation --- cassandra/connection.py | 6 +++++- cassandra/io/geventreactor.py | 35 ++++++++++++++++++++++++++++++++--- 2 files changed, 37 insertions(+), 4 deletions(-) diff --git a/cassandra/connection.py b/cassandra/connection.py index 81ca7dbd52..c21133fb74 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -925,6 +925,10 @@ def add_timer(self, timer): self._timers.put_nowait(timer) def service_timeouts(self): + """ + run callbacks on all expired timers + :return: next end time, or None + """ now = time.time() while not self._timers.empty(): timer = self._timers.get_nowait() @@ -932,7 +936,7 @@ def service_timeouts(self): timer.on_timeout() else: self._timers.put_nowait(timer) - break + return timer.end @property def next_timeout(self): diff --git a/cassandra/io/geventreactor.py b/cassandra/io/geventreactor.py index f315ad0506..076b13d99c 100644 --- a/cassandra/io/geventreactor.py +++ b/cassandra/io/geventreactor.py @@ -20,12 +20,13 @@ from functools import partial import logging import os +import time -from six.moves import xrange +from six.moves import range from errno import EALREADY, EINPROGRESS, EWOULDBLOCK, EINVAL -from cassandra.connection import Connection, ConnectionShutdown +from cassandra.connection import Connection, ConnectionShutdown, Timer, TimerManager log = logging.getLogger(__name__) @@ -48,6 +49,34 @@ class GeventConnection(Connection): _write_watcher = None _socket = None + _timers = None + _timeout_watcher = None + _new_timer = None + + @classmethod + def initialize_reactor(cls): + if not cls._timers: + cls._timers = TimerManager() + cls._timeout_watcher = gevent.spawn(cls.service_timeouts) + cls._new_timer = Event() + + @classmethod + def create_timer(cls, timeout, callback): + timer = Timer(timeout, callback) + cls._timers.add_timer(timer) + cls._new_timer.set() + return timer + + @classmethod + def service_timeouts(cls): + timer_manager = cls._timers + timer_event = cls._new_timer + while True: + next_end = timer_manager.service_timeouts() + sleep_time = next_end - time.time() if next_end else 10000 + timer_event.wait(sleep_time) + timer_event.clear() + def __init__(self, *args, **kwargs): Connection.__init__(self, *args, **kwargs) @@ -157,5 +186,5 @@ def handle_read(self): def push(self, data): chunk_size = self.out_buffer_size - for i in xrange(0, len(data), chunk_size): + for i in range(0, len(data), chunk_size): self._write_queue.put(data[i:i + chunk_size]) From e3fdffb55cdb5176d22f5962594d30fee7616237 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 30 Apr 2015 16:08:32 -0500 Subject: [PATCH 1378/3726] Catch and log exceptions during timeout servicing --- cassandra/connection.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/cassandra/connection.py b/cassandra/connection.py index c21133fb74..856cec31fd 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -933,7 +933,10 @@ def service_timeouts(self): while not self._timers.empty(): timer = self._timers.get_nowait() if timer.end < now: - timer.on_timeout() + try: + timer.on_timeout() + except Exception: + log.exception("Exception while servicing timeout callback: ") else: self._timers.put_nowait(timer) return timer.end From 87028134b1824ad767d511f6503c86a029cd7cb1 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 30 Apr 2015 16:09:51 -0500 Subject: [PATCH 1379/3726] refactor connected_event to Connection init --- cassandra/connection.py | 1 + cassandra/io/asyncorereactor.py | 2 -- cassandra/io/geventreactor.py | 7 +++---- cassandra/io/libevreactor.py | 2 -- cassandra/io/twistedreactor.py | 1 - 5 files changed, 4 insertions(+), 9 deletions(-) diff --git a/cassandra/connection.py b/cassandra/connection.py index 856cec31fd..c1cb08e176 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -224,6 +224,7 @@ def __init__(self, host='127.0.0.1', port=9042, authenticator=None, self._full_header_length = self._header_length + 4 self.lock = RLock() + self.connected_event = Event() @classmethod def initialize_reactor(self): diff --git a/cassandra/io/asyncorereactor.py b/cassandra/io/asyncorereactor.py index ee089ae4fd..8010b19237 100644 --- a/cassandra/io/asyncorereactor.py +++ b/cassandra/io/asyncorereactor.py @@ -158,8 +158,6 @@ def __init__(self, *args, **kwargs): Connection.__init__(self, *args, **kwargs) asyncore.dispatcher.__init__(self) - self.connected_event = Event() - self._callbacks = {} self.deque = deque() self.deque_lock = Lock() diff --git a/cassandra/io/geventreactor.py b/cassandra/io/geventreactor.py index 076b13d99c..83abd5d81b 100644 --- a/cassandra/io/geventreactor.py +++ b/cassandra/io/geventreactor.py @@ -13,7 +13,7 @@ # limitations under the License. import gevent from gevent import select, socket, ssl -from gevent.event import Event +import gevent.event from gevent.queue import Queue from collections import defaultdict @@ -58,7 +58,7 @@ def initialize_reactor(cls): if not cls._timers: cls._timers = TimerManager() cls._timeout_watcher = gevent.spawn(cls.service_timeouts) - cls._new_timer = Event() + cls._new_timer = gevent.event.Event() @classmethod def create_timer(cls, timeout, callback): @@ -73,14 +73,13 @@ def service_timeouts(cls): timer_event = cls._new_timer while True: next_end = timer_manager.service_timeouts() - sleep_time = next_end - time.time() if next_end else 10000 + sleep_time = max(next_end - time.time(), 0) if next_end else 10000 timer_event.wait(sleep_time) timer_event.clear() def __init__(self, *args, **kwargs): Connection.__init__(self, *args, **kwargs) - self.connected_event = Event() self._write_queue = Queue() self._callbacks = {} diff --git a/cassandra/io/libevreactor.py b/cassandra/io/libevreactor.py index c4efbb0df5..150fab21b5 100644 --- a/cassandra/io/libevreactor.py +++ b/cassandra/io/libevreactor.py @@ -264,8 +264,6 @@ def create_timer(self, timeout, callback): def __init__(self, *args, **kwargs): Connection.__init__(self, *args, **kwargs) - self.connected_event = Event() - self._callbacks = {} self.deque = deque() self._deque_lock = Lock() diff --git a/cassandra/io/twistedreactor.py b/cassandra/io/twistedreactor.py index d6534150c0..1077cec0e8 100644 --- a/cassandra/io/twistedreactor.py +++ b/cassandra/io/twistedreactor.py @@ -157,7 +157,6 @@ def __init__(self, *args, **kwargs): """ Connection.__init__(self, *args, **kwargs) - self.connected_event = Event() self.is_closed = True self.connector = None From 3489813bf7e66d39f51199c2c755e990a7d5c197 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 1 May 2015 09:32:24 -0500 Subject: [PATCH 1380/3726] Eventlet timer using eventlet.event.NOT_USED. This works with the current implementation, but there is no guarantee about the stability of this mechanism in eventlet events. Probably need to synchronize Event management. --- cassandra/io/eventletreactor.py | 33 +++++++++++++++++++++++++++++---- 1 file changed, 29 insertions(+), 4 deletions(-) diff --git a/cassandra/io/eventletreactor.py b/cassandra/io/eventletreactor.py index 131cb336cc..709f3a44af 100644 --- a/cassandra/io/eventletreactor.py +++ b/cassandra/io/eventletreactor.py @@ -18,17 +18,18 @@ from collections import defaultdict from errno import EALREADY, EINPROGRESS, EWOULDBLOCK, EINVAL -import eventlet +import eventlet.event from eventlet.green import select, socket from eventlet.queue import Queue +from eventlet.timeout import Timeout from functools import partial import logging import os -from threading import Event +import time from six.moves import xrange -from cassandra.connection import Connection, ConnectionShutdown +from cassandra.connection import Connection, ConnectionShutdown, Timer, TimerManager log = logging.getLogger(__name__) @@ -51,14 +52,38 @@ class EventletConnection(Connection): _write_watcher = None _socket = None + _timers = None + _timeout_watcher = None + _new_timer = None + @classmethod def initialize_reactor(cls): eventlet.monkey_patch() + if not cls._timers: + cls._timers = TimerManager() + cls._timeout_watcher = eventlet.spawn(cls.service_timeouts) + cls._new_timer = eventlet.event.Event() + + @classmethod + def create_timer(cls, timeout, callback): + timer = Timer(timeout, callback) + cls._timers.add_timer(timer) + cls._new_timer.send(eventlet.event.NOT_USED) + return timer + + @classmethod + def service_timeouts(cls): + timer_manager = cls._timers + while True: + next_end = timer_manager.service_timeouts() + sleep_time = max(next_end - time.time(), 0) if next_end else 10000 + with Timeout(sleep_time, False): + cls._new_timer.wait() + cls._new_timer = eventlet.event.Event() def __init__(self, *args, **kwargs): Connection.__init__(self, *args, **kwargs) - self.connected_event = Event() self._write_queue = Queue() self._callbacks = {} From c44365c8b204edb778e2046d31e9f2ff193bb8a9 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 1 May 2015 10:42:06 -0500 Subject: [PATCH 1381/3726] Use threading.Event for eventlet timeouts Removes Eventlet Events, which are more difficult to use --- cassandra/io/eventletreactor.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/cassandra/io/eventletreactor.py b/cassandra/io/eventletreactor.py index 709f3a44af..8be3a07353 100644 --- a/cassandra/io/eventletreactor.py +++ b/cassandra/io/eventletreactor.py @@ -18,13 +18,13 @@ from collections import defaultdict from errno import EALREADY, EINPROGRESS, EWOULDBLOCK, EINVAL -import eventlet.event +import eventlet from eventlet.green import select, socket from eventlet.queue import Queue -from eventlet.timeout import Timeout from functools import partial import logging import os +from threading import Event import time from six.moves import xrange @@ -62,13 +62,13 @@ def initialize_reactor(cls): if not cls._timers: cls._timers = TimerManager() cls._timeout_watcher = eventlet.spawn(cls.service_timeouts) - cls._new_timer = eventlet.event.Event() + cls._new_timer = Event() @classmethod def create_timer(cls, timeout, callback): timer = Timer(timeout, callback) cls._timers.add_timer(timer) - cls._new_timer.send(eventlet.event.NOT_USED) + cls._new_timer.set() return timer @classmethod @@ -77,9 +77,8 @@ def service_timeouts(cls): while True: next_end = timer_manager.service_timeouts() sleep_time = max(next_end - time.time(), 0) if next_end else 10000 - with Timeout(sleep_time, False): - cls._new_timer.wait() - cls._new_timer = eventlet.event.Event() + cls._new_timer.wait(sleep_time) + cls._new_timer.clear() def __init__(self, *args, **kwargs): Connection.__init__(self, *args, **kwargs) From 6ffca60cd1291277fd05967b9dbeecd764b9c615 Mon Sep 17 00:00:00 2001 From: Kishan Karunaratne Date: Fri, 1 May 2015 16:29:02 -0700 Subject: [PATCH 1382/3726] test for python-206 connect timeouts --- tests/integration/standard/test_cluster.py | 27 +++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index 0fcabd104e..146859ba81 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -30,7 +30,7 @@ WhiteListRoundRobinPolicy) from cassandra.query import SimpleStatement, TraceUnavailable -from tests.integration import use_singledc, PROTOCOL_VERSION, get_server_versions +from tests.integration import use_singledc, PROTOCOL_VERSION, get_server_versions, get_node from tests.integration.util import assert_quiescent_pool_state @@ -40,6 +40,31 @@ def setup_module(): class ClusterTests(unittest.TestCase): + def test_raise_error_on_control_connection_timeout(self): + """ + Test for initial control connection timeout + + test_raise_error_on_control_connection_timeout tests that the driver times out after the set initial connection + timeout. It first pauses node1, essentially making it unreachable. It then attempts to create a Cluster object + via connecting to node1 with a timeout of 1 second, and ensures that a NoHostAvailable is raised, along with + an OperationTimedOut for 1 second. + + @expected_errors NoHostAvailable When node1 is paused, and a connection attempt is made. + @since 2.6.0 + @jira_ticket PYTHON-206 + @expected_result NoHostAvailable exception should be raised after 1 second. + + @test_category connection + """ + + get_node(1).pause() + cluster = Cluster(contact_points=['127.0.0.1'], protocol_version=PROTOCOL_VERSION, connect_timeout=1) + + with self.assertRaisesRegexp(NoHostAvailable, "OperationTimedOut\('errors=Timed out creating connection \(1 seconds\)"): + cluster.connect() + + get_node(1).resume() + def test_basic(self): """ Test basic connection and usage From afe2e5ae8eabc101ae044869b21464d2733a4a14 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 4 May 2015 11:00:03 -0500 Subject: [PATCH 1383/3726] Twisted timer implementation --- cassandra/io/twistedreactor.py | 37 ++++++++++++++++++++++++++++++---- 1 file changed, 33 insertions(+), 4 deletions(-) diff --git a/cassandra/io/twistedreactor.py b/cassandra/io/twistedreactor.py index 1077cec0e8..12a8f5303f 100644 --- a/cassandra/io/twistedreactor.py +++ b/cassandra/io/twistedreactor.py @@ -15,14 +15,15 @@ Module that implements an event loop based on twisted ( https://twistedmatrix.com ). """ -from twisted.internet import reactor, protocol -from threading import Event, Thread, Lock +import atexit from functools import partial import logging +from threading import Event, Thread, Lock +import time +from twisted.internet import reactor, protocol import weakref -import atexit -from cassandra.connection import Connection, ConnectionShutdown +from cassandra.connection import Connection, ConnectionShutdown, Timer, TimerManager log = logging.getLogger(__name__) @@ -107,9 +108,12 @@ class TwistedLoop(object): _lock = None _thread = None + _timeout_task = None + _timeout = None def __init__(self): self._lock = Lock() + self._timers = TimerManager() def maybe_start(self): with self._lock: @@ -131,6 +135,25 @@ def _cleanup(self): "Cluster.shutdown() to avoid this.") log.debug("Event loop thread was joined") + def add_timer(self, timer): + self._timers.add_timer(timer) + reactor.callFromThread(self._schedule_timeout, timer.end) + + def _schedule_timeout(self, next_timeout): + if next_timeout: + delay = max(next_timeout - time.time(), 0) + if self._timeout_task and self._timeout_task.active(): + if next_timeout < self._timeout: + self._timeout_task.reset(delay) + self._timeout = next_timeout + else: + self._timeout_task = reactor.callLater(delay, self._on_loop_timer) + self._timeout = next_timeout + + def _on_loop_timer(self): + self._timers.service_timeouts() + self._schedule_timeout(self._timers.next_timeout) + class TwistedConnection(Connection): """ @@ -146,6 +169,12 @@ def initialize_reactor(cls): if not cls._loop: cls._loop = TwistedLoop() + @classmethod + def create_timer(cls, timeout, callback): + timer = Timer(timeout, callback) + cls._loop.add_timer(timer) + return timer + def __init__(self, *args, **kwargs): """ Initialization method. From 7f5e9df1b6f27629e280e0d9947950565d55af90 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 4 May 2015 11:00:39 -0500 Subject: [PATCH 1384/3726] self-->cls in create_timer classmethods --- cassandra/io/asyncorereactor.py | 4 ++-- cassandra/io/libevreactor.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cassandra/io/asyncorereactor.py b/cassandra/io/asyncorereactor.py index 8010b19237..52972b9e45 100644 --- a/cassandra/io/asyncorereactor.py +++ b/cassandra/io/asyncorereactor.py @@ -149,9 +149,9 @@ def handle_fork(cls): cls._loop = None @classmethod - def create_timer(self, timeout, callback): + def create_timer(cls, timeout, callback): timer = Timer(timeout, callback) - self._loop.add_timer(timer) + cls._loop.add_timer(timer) return timer def __init__(self, *args, **kwargs): diff --git a/cassandra/io/libevreactor.py b/cassandra/io/libevreactor.py index 150fab21b5..3559c0bf59 100644 --- a/cassandra/io/libevreactor.py +++ b/cassandra/io/libevreactor.py @@ -256,9 +256,9 @@ def handle_fork(cls): cls._libevloop = None @classmethod - def create_timer(self, timeout, callback): + def create_timer(cls, timeout, callback): timer = Timer(timeout, callback) - self._libevloop.add_timer(timer) + cls._libevloop.add_timer(timer) return timer def __init__(self, *args, **kwargs): From 67e19aad0b7ce13764bb1c7460366392c20fef1f Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 4 May 2015 12:01:16 -0500 Subject: [PATCH 1385/3726] Refactor _callbacks and _push_watchers init to Connection --- cassandra/connection.py | 1 + cassandra/io/asyncorereactor.py | 1 - cassandra/io/eventletreactor.py | 3 --- cassandra/io/geventreactor.py | 3 --- cassandra/io/libevreactor.py | 1 - cassandra/io/twistedreactor.py | 1 - tests/unit/test_connection.py | 3 --- 7 files changed, 1 insertion(+), 12 deletions(-) diff --git a/cassandra/connection.py b/cassandra/connection.py index c1cb08e176..57f5f96ad8 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -194,6 +194,7 @@ def __init__(self, host='127.0.0.1', port=9042, authenticator=None, self.is_control_connection = is_control_connection self.user_type_map = user_type_map self._push_watchers = defaultdict(set) + self._callbacks = {} self._iobuf = io.BytesIO() if protocol_version >= 3: self._header_unpack = v3_header_unpack diff --git a/cassandra/io/asyncorereactor.py b/cassandra/io/asyncorereactor.py index 52972b9e45..b815d20aed 100644 --- a/cassandra/io/asyncorereactor.py +++ b/cassandra/io/asyncorereactor.py @@ -158,7 +158,6 @@ def __init__(self, *args, **kwargs): Connection.__init__(self, *args, **kwargs) asyncore.dispatcher.__init__(self) - self._callbacks = {} self.deque = deque() self.deque_lock = Lock() diff --git a/cassandra/io/eventletreactor.py b/cassandra/io/eventletreactor.py index 8be3a07353..5db0816c88 100644 --- a/cassandra/io/eventletreactor.py +++ b/cassandra/io/eventletreactor.py @@ -85,9 +85,6 @@ def __init__(self, *args, **kwargs): self._write_queue = Queue() - self._callbacks = {} - self._push_watchers = defaultdict(set) - sockerr = None addresses = socket.getaddrinfo( self.host, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM diff --git a/cassandra/io/geventreactor.py b/cassandra/io/geventreactor.py index 83abd5d81b..84285957bb 100644 --- a/cassandra/io/geventreactor.py +++ b/cassandra/io/geventreactor.py @@ -82,9 +82,6 @@ def __init__(self, *args, **kwargs): self._write_queue = Queue() - self._callbacks = {} - self._push_watchers = defaultdict(set) - sockerr = None addresses = socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM) for (af, socktype, proto, canonname, sockaddr) in addresses: diff --git a/cassandra/io/libevreactor.py b/cassandra/io/libevreactor.py index 3559c0bf59..31c9cd3812 100644 --- a/cassandra/io/libevreactor.py +++ b/cassandra/io/libevreactor.py @@ -264,7 +264,6 @@ def create_timer(cls, timeout, callback): def __init__(self, *args, **kwargs): Connection.__init__(self, *args, **kwargs) - self._callbacks = {} self.deque = deque() self._deque_lock = Lock() diff --git a/cassandra/io/twistedreactor.py b/cassandra/io/twistedreactor.py index 12a8f5303f..9b835fe137 100644 --- a/cassandra/io/twistedreactor.py +++ b/cassandra/io/twistedreactor.py @@ -189,7 +189,6 @@ def __init__(self, *args, **kwargs): self.is_closed = True self.connector = None - self._callbacks = {} reactor.callFromThread(self.add_connection) self._loop.maybe_start() diff --git a/tests/unit/test_connection.py b/tests/unit/test_connection.py index 7fc4ed4e5a..a779e1338c 100644 --- a/tests/unit/test_connection.py +++ b/tests/unit/test_connection.py @@ -261,10 +261,7 @@ def test_not_implemented(self): Ensure the following methods throw NIE's. If not, come back and test them. """ c = self.make_connection() - self.assertRaises(NotImplementedError, c.close) - self.assertRaises(NotImplementedError, c.register_watcher, None, None) - self.assertRaises(NotImplementedError, c.register_watchers, None) def test_set_keyspace_blocking(self): c = self.make_connection() From d7c68f9e18178c9d1f07e3c49348b27f94d1afe8 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 4 May 2015 13:23:29 -0500 Subject: [PATCH 1386/3726] Allow ResponseFutures without a timeout --- cassandra/cluster.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 834b96367b..09ce11f657 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2600,7 +2600,8 @@ def __init__(self, session, message, query, timeout, metrics=None, prepared_stat self._start_timer() def _start_timer(self): - self._timer = self.session.cluster.connection_class.create_timer(self.timeout, self._on_timeout) + if self.timeout is not None: + self._timer = self.session.cluster.connection_class.create_timer(self.timeout, self._on_timeout) def _cancel_timer(self): if self._timer: From 4b2f046452826119b03ed17b110bc49e6af30e0f Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 4 May 2015 13:24:21 -0500 Subject: [PATCH 1387/3726] Remove deprecated, unused import from cqlengine getting started --- docs/object_mapper.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/object_mapper.rst b/docs/object_mapper.rst index 26d78a0964..4e38994064 100644 --- a/docs/object_mapper.rst +++ b/docs/object_mapper.rst @@ -48,7 +48,7 @@ Getting Started from cassandra.cqlengine import columns from cassandra.cqlengine import connection from datetime import datetime - from cassandra.cqlengine.management import create_keyspace, sync_table + from cassandra.cqlengine.management import sync_table from cassandra.cqlengine.models import Model #first, define a model From 0356f85c06570ca755ac9ae3f2e83c7db8b84888 Mon Sep 17 00:00:00 2001 From: Kishan Karunaratne Date: Mon, 4 May 2015 15:02:59 -0700 Subject: [PATCH 1388/3726] tests for PYTHON-246 --- .../cqlengine/model/test_model_io.py | 101 +++++++++++++++++- .../integration/cqlengine/model/test_udts.py | 72 +++++++++---- 2 files changed, 151 insertions(+), 22 deletions(-) diff --git a/tests/integration/cqlengine/model/test_model_io.py b/tests/integration/cqlengine/model/test_model_io.py index b9f3936f19..1af9a8af0f 100644 --- a/tests/integration/cqlengine/model/test_model_io.py +++ b/tests/integration/cqlengine/model/test_model_io.py @@ -12,9 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -from uuid import uuid4 +from uuid import uuid4, UUID import random -from datetime import date +from datetime import date, datetime +from decimal import Decimal from operator import itemgetter from cassandra.cqlengine import CQLEngineException from tests.integration.cqlengine.base import BaseCassEngTestCase @@ -124,6 +125,102 @@ def test_a_sensical_error_is_raised_if_you_try_to_create_a_table_twice(self): sync_table(TestModel) sync_table(TestModel) + def test_can_insert_model_with_all_column_types(self): + """ + Test for inserting all column types into a Model + + test_can_insert_model_with_all_column_types tests that each cqlengine column type can be inserted into a Model. + It first creates a Model that has each cqlengine column type. It then creates a Model instance where all the fields + have corresponding data, which performs the insert into the Cassandra table. + Finally, it verifies that each column read from the Model from Cassandra is the same as the input parameters. + + @since 2.6.0 + @jira_ticket PYTHON-246 + @expected_result The Model is inserted with each column type, and the resulting read yields proper data for each column. + + @test_category data_types:primitive + """ + + class AllDatatypesModel(Model): + id = columns.Integer(primary_key=True) + a = columns.Ascii() + b = columns.BigInt() + c = columns.Blob() + d = columns.Boolean() + e = columns.Date() + f = columns.DateTime() + g = columns.Decimal() + h = columns.Double() + i = columns.Float(double_precision=False) + j = columns.Inet() + k = columns.Integer() + l = columns.Text() + m = columns.TimeUUID() + n = columns.UUID() + o = columns.VarInt() + + sync_table(AllDatatypesModel) + + input = ['ascii', 2 ** 63 - 1, bytearray(b'hello world'), True, date(1970, 1, 1), + datetime.utcfromtimestamp(872835240), Decimal('12.3E+7'), 2.39, + 3.4028234663852886e+38, '123.123.123.123', 2147483647, 'text', + UUID('FE2B4360-28C6-11E2-81C1-0800200C9A66'), UUID('067e6162-3b6f-4ae2-a171-2470b63dff00'), + int(str(2147483647) + '000')] + + AllDatatypesModel.create(id=0, a='ascii', b=2 ** 63 - 1, c=bytearray(b'hello world'), d=True, e=date(1970, 1, 1), + f=datetime.utcfromtimestamp(872835240), g=Decimal('12.3E+7'), h=2.39, + i=3.4028234663852886e+38, j='123.123.123.123', k=2147483647, l='text', + m=UUID('FE2B4360-28C6-11E2-81C1-0800200C9A66'), n=UUID('067e6162-3b6f-4ae2-a171-2470b63dff00'), + o=int(str(2147483647) + '000')) + + self.assertEqual(1, AllDatatypesModel.objects.count()) + output = AllDatatypesModel.objects().first() + + for i, i_char in enumerate(range(ord('a'), ord('a') + 15)): + self.assertEqual(input[i], output[chr(i_char)]) + + def test_can_insert_double_and_float(self): + """ + Test for inserting single-precision and double-precision values into a Float and Double columns + + test_can_insert_double_and_float tests a Float can only hold a single-precision value, unless + "double_precision" attribute is specified as True or is unspecified. This test first tests that an AttributeError + is raised when attempting to input a double-precision value into a single-precision Float. It then verifies that + Double, Float(double_precision=True) and Float() can hold double-precision values by default. It also verifies that + columns.Float(double_precision=False) can hold a single-precision value, and a Double can hold a single-precision value. + + @since 2.6.0 + @jira_ticket PYTHON-246 + @expected_result Each floating point column type is able to hold their respective precision values. + + @test_category data_types:primitive + """ + + class FloatingPointModel(Model): + id = columns.Integer(primary_key=True) + a = columns.Float(double_precision=False) + b = columns.Float(double_precision=True) + c = columns.Float() + d = columns.Double() + + sync_table(FloatingPointModel) + + FloatingPointModel.create(id=0, a=2.39) + output = FloatingPointModel.objects().first() + self.assertEqual(2.390000104904175, output.a) + + FloatingPointModel.create(id=0, a=3.4028234663852886e+38, b=2.39, c=2.39, d=2.39) + output = FloatingPointModel.objects().first() + + self.assertEqual(3.4028234663852886e+38, output.a) + self.assertEqual(2.39, output.b) + self.assertEqual(2.39, output.c) + self.assertEqual(2.39, output.d) + + FloatingPointModel.create(id=0, d=3.4028234663852886e+38) + output = FloatingPointModel.objects().first() + self.assertEqual(3.4028234663852886e+38, output.d) + class TestMultiKeyModel(Model): diff --git a/tests/integration/cqlengine/model/test_udts.py b/tests/integration/cqlengine/model/test_udts.py index 89a55d1730..3299fda310 100644 --- a/tests/integration/cqlengine/model/test_udts.py +++ b/tests/integration/cqlengine/model/test_udts.py @@ -187,7 +187,22 @@ class DepthModel(Model): self.assertEqual(udts[2], output.v_2) self.assertEqual(udts[3], output.v_3) - def test_can_insert_udts_with_nulls(self): + def test_can_insert_udts_with_nones(self): + """ + Test for inserting all column types as empty into a UserType as None's + + test_can_insert_udts_with_nones tests that each cqlengine column type can be inserted into a UserType as None's. + It first creates a UserType that has each cqlengine column type, and a corresponding table/Model. It then creates + a UserType instance where all the fields are None's and inserts the UserType as an instance of the Model. Finally, + it verifies that each column read from the UserType from Cassandra is None. + + @since 2.5.0 + @jira_ticket PYTHON-251 + @expected_result The UserType is inserted with each column type, and the resulting read yields None's for each column. + + @test_category data_types:udt + """ + class AllDatatypes(UserType): a = columns.Ascii() b = columns.BigInt() @@ -196,13 +211,14 @@ class AllDatatypes(UserType): e = columns.Date() f = columns.DateTime() g = columns.Decimal() - h = columns.Float(double_precision=False) - i = columns.Inet() - j = columns.Integer() - k = columns.Text() - l = columns.TimeUUID() - m = columns.UUID() - n = columns.VarInt() + h = columns.Double() + i = columns.Float(double_precision=False) + j = columns.Inet() + k = columns.Integer() + l = columns.Text() + m = columns.TimeUUID() + n = columns.UUID() + o = columns.VarInt() class AllDatatypesModel(Model): id = columns.Integer(primary_key=True) @@ -219,6 +235,21 @@ class AllDatatypesModel(Model): self.assertEqual(input, output) def test_can_insert_udts_with_all_datatypes(self): + """ + Test for inserting all column types into a UserType + + test_can_insert_udts_with_all_datatypes tests that each cqlengine column type can be inserted into a UserType. + It first creates a UserType that has each cqlengine column type, and a corresponding table/Model. It then creates + a UserType instance where all the fields have corresponding data, and inserts the UserType as an instance of the Model. + Finally, it verifies that each column read from the UserType from Cassandra is the same as the input parameters. + + @since 2.5.0 + @jira_ticket PYTHON-251 + @expected_result The UserType is inserted with each column type, and the resulting read yields proper data for each column. + + @test_category data_types:udt + """ + class AllDatatypes(UserType): a = columns.Ascii() b = columns.BigInt() @@ -228,12 +259,13 @@ class AllDatatypes(UserType): f = columns.DateTime() g = columns.Decimal() h = columns.Double() - i = columns.Inet() - j = columns.Integer() - k = columns.Text() - l = columns.TimeUUID() - m = columns.UUID() - n = columns.VarInt() + i = columns.Float(double_precision=False) + j = columns.Inet() + k = columns.Integer() + l = columns.Text() + m = columns.TimeUUID() + n = columns.UUID() + o = columns.VarInt() class AllDatatypesModel(Model): id = columns.Integer(primary_key=True) @@ -242,14 +274,14 @@ class AllDatatypesModel(Model): sync_table(AllDatatypesModel) input = AllDatatypes(a='ascii', b=2 ** 63 - 1, c=bytearray(b'hello world'), d=True, e=date(1970, 1, 1), - f=datetime.utcfromtimestamp(872835240), - g=Decimal('12.3E+7'), h=3.4028234663852886e+38, i='123.123.123.123', j=2147483647, - k='text', l= UUID('FE2B4360-28C6-11E2-81C1-0800200C9A66'), - m=UUID('067e6162-3b6f-4ae2-a171-2470b63dff00'), n=int(str(2147483647) + '000')) - alldata = AllDatatypesModel.create(id=0, data=input) + f=datetime.utcfromtimestamp(872835240), g=Decimal('12.3E+7'), h=2.39, + i=3.4028234663852886e+38, j='123.123.123.123', k=2147483647, l='text', + m=UUID('FE2B4360-28C6-11E2-81C1-0800200C9A66'), n=UUID('067e6162-3b6f-4ae2-a171-2470b63dff00'), + o=int(str(2147483647) + '000')) + AllDatatypesModel.create(id=0, data=input) self.assertEqual(1, AllDatatypesModel.objects.count()) output = AllDatatypesModel.objects().first().data - for i in range(ord('a'), ord('a') + 14): + for i in range(ord('a'), ord('a') + 15): self.assertEqual(input[chr(i)], output[chr(i)]) From 7cb178ff0190b785369747d42604eee64ad6bbaf Mon Sep 17 00:00:00 2001 From: GregBestland Date: Tue, 5 May 2015 17:24:40 -0500 Subject: [PATCH 1389/3726] Adding tests for PYTHON-235 --- tests/integration/standard/test_query.py | 40 +++++++++++++++++++++++- 1 file changed, 39 insertions(+), 1 deletion(-) diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index 780f307891..45cdcaf880 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import os - +import socket from cassandra.concurrent import execute_concurrent @@ -89,6 +89,44 @@ def test_trace_ignores_row_factory(self): cluster.shutdown() + def test_trace_client_ip(self): + """ ++ Test to validate that client trace contains client ip information. ++ ++ creates a simple query and ensures that the client trace information is present. This will + only be the case if the CQL version is 4 or greater./ + ++ ++ @since 3.0 ++ @jira_ticket PYTHON-235 ++ @expected_result client address should be present in CQL > 4, otherwise should be none. ++ ++ @test_category trace ++ """ + if PROTOCOL_VERSION < 4: + raise unittest.SkipTest( + "Protocol 4+ is required for client ip tracing, currently testing against %r" + % (PROTOCOL_VERSION,)) + cluster = Cluster(protocol_version=PROTOCOL_VERSION) + session = cluster.connect() + + query = "SELECT * FROM system.local" + statement = SimpleStatement(query) + session.execute(statement, trace=True) + + # Fetch the client_ip from the trace. + client_ip=statement.trace.client + # Ensure that ip is set for CQL >4 + self.assertIsNotNone(client_ip,"Client IP was not set in trace with CQL >=4.0") + # TODO we might want validate that client_ip actually matches our local ip rather than just validate that it + # is a valid ip. + try: + socket.inet_aton(client_ip) + except socket.error: + self.fail("Client IP retrieved from trace was not valid :{0}".format(client_ip)) + + cluster.shutdown() + class PreparedStatementTests(unittest.TestCase): From 60f59ce7f29f4d23fba974b4fb022adb3acf8465 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 6 May 2015 10:50:05 -0500 Subject: [PATCH 1390/3726] Allow canceled timers to finish ahead of end time. --- cassandra/connection.py | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/cassandra/connection.py b/cassandra/connection.py index 57f5f96ad8..54641a3ac5 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -911,8 +911,11 @@ def cancel(self): self.callback = self._noop self.cancelled = True - def on_timeout(self): - self.callback() + def finish(self, time_now): + if self.callback is self._noop or time_now >= self.end: + self.callback() + return True + return False def _noop(self): pass @@ -934,14 +937,12 @@ def service_timeouts(self): now = time.time() while not self._timers.empty(): timer = self._timers.get_nowait() - if timer.end < now: - try: - timer.on_timeout() - except Exception: - log.exception("Exception while servicing timeout callback: ") - else: - self._timers.put_nowait(timer) - return timer.end + try: + if not timer.finish(now): + self._timers.put_nowait(timer) + return timer.end + except Exception: + log.exception("Exception while servicing timeout callback: ") @property def next_timeout(self): From c17e6e45e3d5b18961f59ccccd38ce71eee2cf0a Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 6 May 2015 10:52:04 -0500 Subject: [PATCH 1391/3726] libev: service timeouts before updating loop timer. Also, never start a timer with zero repeat (this causes it to stop, rather than timeout immediately). --- cassandra/io/libevreactor.py | 1 + cassandra/io/libevwrapper.c | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/cassandra/io/libevreactor.py b/cassandra/io/libevreactor.py index 31c9cd3812..e6abb76b41 100644 --- a/cassandra/io/libevreactor.py +++ b/cassandra/io/libevreactor.py @@ -148,6 +148,7 @@ def add_timer(self, timer): def _update_timer(self): if not self._shutdown: + self._timers.service_timeouts() offset = self._timers.next_offset or 100000 # none pending; will be updated again when something new happens self._loop_timer.start(offset) else: diff --git a/cassandra/io/libevwrapper.c b/cassandra/io/libevwrapper.c index 1c52a96464..99e1df30f7 100644 --- a/cassandra/io/libevwrapper.c +++ b/cassandra/io/libevwrapper.c @@ -517,7 +517,9 @@ Timer_start(libevwrapper_Timer *self, PyObject *args) { if (!PyArg_ParseTuple(args, "d", &timeout)) { return NULL; } - self->timer.repeat = fmax(timeout, 0.0); + /* some tiny non-zero number to avoid zero, and + make it run immediately for negative timeouts */ + self->timer.repeat = fmax(timeout, 0.000000001); ev_timer_again(self->loop->loop, &self->timer); Py_RETURN_NONE; } From bca2a69392175a1b15f2a8c32bf1fc6c67f3d758 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Wed, 6 May 2015 13:07:06 -0500 Subject: [PATCH 1392/3726] Fixing test for PYTHON-235 --- tests/integration/standard/test_query.py | 57 +++++++++++++----------- 1 file changed, 30 insertions(+), 27 deletions(-) diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index 45cdcaf880..bb1cafa776 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -27,7 +27,7 @@ from cassandra.cluster import Cluster from cassandra.policies import HostDistance -from tests.integration import use_singledc, PROTOCOL_VERSION +from tests.integration import use_singledc, PROTOCOL_VERSION, get_server_versions def setup_module(): @@ -89,42 +89,45 @@ def test_trace_ignores_row_factory(self): cluster.shutdown() - def test_trace_client_ip(self): + def test_client_ip_in_trace(self): """ -+ Test to validate that client trace contains client ip information. -+ -+ creates a simple query and ensures that the client trace information is present. This will - only be the case if the CQL version is 4 or greater./ + Test to validate that client trace contains client ip information. -+ -+ @since 3.0 -+ @jira_ticket PYTHON-235 -+ @expected_result client address should be present in CQL > 4, otherwise should be none. -+ -+ @test_category trace + creates a simple query and ensures that the client trace information is present. This will + only be the case if the c* version is 3.0 or greater + + + @since 3.0 + @jira_ticket PYTHON-235 + @expected_result client address should be present in C* > 3, otherwise should be none. + + @test_category tracing + """ + #The current version on the trunk doesn't have the version set to 3.0 yet. + #For now we will use the protocol version. Once they update the version on C* trunk + #we can use the C*. See below + #self._cass_version, self._cql_version = get_server_versions() + #if self._cass_version < (3, 0): + # raise unittest.SkipTest("Client IP was not present in trace until C* 3.0") if PROTOCOL_VERSION < 4: - raise unittest.SkipTest( - "Protocol 4+ is required for client ip tracing, currently testing against %r" - % (PROTOCOL_VERSION,)) + raise unittest.SkipTest( + "Protocol 4+ is required for client ip tracing, currently testing against %r" + % (PROTOCOL_VERSION,)) + cluster = Cluster(protocol_version=PROTOCOL_VERSION) session = cluster.connect() query = "SELECT * FROM system.local" statement = SimpleStatement(query) - session.execute(statement, trace=True) - + response_future = session.execute_async(statement, trace=True) + response_future.result(10.0) + current_host = response_future._current_host.address # Fetch the client_ip from the trace. - client_ip=statement.trace.client - # Ensure that ip is set for CQL >4 - self.assertIsNotNone(client_ip,"Client IP was not set in trace with CQL >=4.0") - # TODO we might want validate that client_ip actually matches our local ip rather than just validate that it - # is a valid ip. - try: - socket.inet_aton(client_ip) - except socket.error: - self.fail("Client IP retrieved from trace was not valid :{0}".format(client_ip)) - + trace = response_future.get_query_trace(2.0) + client_ip = trace.client + # Ensure that ip is set for c* >3 + self.assertIsNotNone(client_ip,"Client IP was not set in trace with C* > 3.0") + self.assertEqual(client_ip,current_host,"Client IP from trace did not match the expected value") cluster.shutdown() From 9d8c50bb980f3611cb5e4705adbb3e036e48ef11 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 6 May 2015 14:53:07 -0500 Subject: [PATCH 1393/3726] Deprecate ResponseFuture.result timeout --- cassandra/cluster.py | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 09ce11f657..573ba3db64 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -27,6 +27,7 @@ import sys import time from threading import Lock, RLock, Thread, Event +import warnings import six from six.moves import range @@ -71,6 +72,7 @@ BatchStatement, bind_params, QueryTrace, Statement, named_tuple_factory, dict_factory, FETCH_SIZE_UNSET) + def _is_eventlet_monkey_patched(): if 'eventlet.patcher' not in sys.modules: return False @@ -1265,8 +1267,7 @@ class Session(object): """ A default timeout, measured in seconds, for queries executed through :meth:`.execute()` or :meth:`.execute_async()`. This default may be - overridden with the `timeout` parameter for either of those methods - or the `timeout` parameter for :meth:`.ResponseFuture.result()`. + overridden with the `timeout` parameter for either of those methods. Setting this to :const:`None` will cause no timeouts to be set by default. @@ -2961,6 +2962,11 @@ def result(self, timeout=_NOT_SET): encountered. If the final result or error has not been set yet, this method will block until that time. + .. versionchanged:: 2.6.0 + + **`timeout` is deprecated. Use timeout in the Session execute functions instead. + The following description applies to deprecated behavior:** + You may set a timeout (in seconds) with the `timeout` parameter. By default, the :attr:`~.default_timeout` for the :class:`.Session` this was created through will be used for the timeout on this @@ -2993,10 +2999,13 @@ def result(self, timeout=_NOT_SET): """ if timeout is not _NOT_SET: - # TODO: warn deprecated - pass + msg = "ResponseFuture.result timeout argument is deprecated. Specify the request timeout via Session.execute[_async]." + warnings.warn(msg, DeprecationWarning) + log.warning(msg) + else: + timeout = None - self._event.wait() + self._event.wait(timeout) if self._final_result is not _NOT_SET: if self._paging_state is None: return self._final_result From 70478e62fd7d98823c1ad2dd9e6dcb96dd4c0601 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 6 May 2015 15:01:14 -0500 Subject: [PATCH 1394/3726] Remove unused 'timeout' in ..cluster.PagedResult --- cassandra/cluster.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 573ba3db64..b8a236a30e 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -3010,7 +3010,7 @@ def result(self, timeout=_NOT_SET): if self._paging_state is None: return self._final_result else: - return PagedResult(self, self._final_result, timeout) + return PagedResult(self, self._final_result) else: raise self._final_exception @@ -3162,10 +3162,9 @@ class will be returned. response_future = None - def __init__(self, response_future, initial_response, timeout=_NOT_SET): + def __init__(self, response_future, initial_response): self.response_future = response_future self.current_response = iter(initial_response) - self.timeout = timeout def __iter__(self): return self From 7f2e992d768fbcfee57e493ded850a9299f9408c Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 6 May 2015 16:16:40 -0500 Subject: [PATCH 1395/3726] Minor tweaks on trace client integration test --- tests/integration/standard/test_query.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index bb1cafa776..7f84f9ab31 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. import os -import socket from cassandra.concurrent import execute_concurrent @@ -99,7 +98,7 @@ def test_client_ip_in_trace(self): @since 3.0 @jira_ticket PYTHON-235 - @expected_result client address should be present in C* > 3, otherwise should be none. + @expected_result client address should be present in C* >= 3, otherwise should be none. @test_category tracing + """ From 087293050b322b736a7468ae70c2044d1510c47a Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 6 May 2015 16:29:23 -0500 Subject: [PATCH 1396/3726] Update unit tests for new refresh_schema signature. --- tests/unit/test_control_connection.py | 6 +++--- tests/unit/test_response_future.py | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/unit/test_control_connection.py b/tests/unit/test_control_connection.py index 1f1ccc1bf1..9c93f5afe4 100644 --- a/tests/unit/test_control_connection.py +++ b/tests/unit/test_control_connection.py @@ -410,12 +410,12 @@ def test_handle_schema_change(self): } self.cluster.scheduler.reset_mock() self.control_connection._handle_schema_change(event) - self.cluster.scheduler.schedule_unique.assert_called_once_with(0.0, self.control_connection.refresh_schema, 'ks1', 'table1', None) + self.cluster.scheduler.schedule_unique.assert_called_once_with(0.0, self.control_connection.refresh_schema, 'ks1', 'table1', None, None, None) self.cluster.scheduler.reset_mock() event['table'] = None self.control_connection._handle_schema_change(event) - self.cluster.scheduler.schedule_unique.assert_called_once_with(0.0, self.control_connection.refresh_schema, 'ks1', None, None) + self.cluster.scheduler.schedule_unique.assert_called_once_with(0.0, self.control_connection.refresh_schema, 'ks1', None, None, None, None) def test_refresh_disabled(self): cluster = MockCluster() @@ -463,4 +463,4 @@ def test_refresh_disabled(self): cc_no_topo_refresh._handle_schema_change(schema_event) cluster.scheduler.schedule_unique.assert_has_calls([call(ANY, cc_no_topo_refresh.refresh_node_list_and_token_map), call(0.0, cc_no_topo_refresh.refresh_schema, - schema_event['keyspace'], schema_event['table'], None)]) + schema_event['keyspace'], schema_event['table'], None, None, None)]) diff --git a/tests/unit/test_response_future.py b/tests/unit/test_response_future.py index 027fe73214..92351a9d1d 100644 --- a/tests/unit/test_response_future.py +++ b/tests/unit/test_response_future.py @@ -105,7 +105,7 @@ def test_schema_change_result(self): kind=RESULT_KIND_SCHEMA_CHANGE, results={'keyspace': "keyspace1", "table": "table1"}) rf._set_result(result) - session.submit.assert_called_once_with(ANY, 'keyspace1', 'table1', None, ANY, rf) + session.submit.assert_called_once_with(ANY, 'keyspace1', 'table1', None, None, None, ANY, rf) def test_other_result_message_kind(self): session = self.make_session() From b10d945886a84e73b9542f389d9a46a8e717775c Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 7 May 2015 15:59:35 -0500 Subject: [PATCH 1397/3726] Synchronize, and short circuit CC._signal_error during shutdown Addresses an issue where a failed executor-scheduled refresh_schema logs an error because self._connection is removed during shutdown. --- cassandra/cluster.py | 36 +++++++++++++++++++---------------- tests/integration/__init__.py | 2 +- 2 files changed, 21 insertions(+), 17 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index c5c83a730f..f1c6cd3615 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2009,14 +2009,14 @@ def shutdown(self): else: self._is_shutdown = True - log.debug("Shutting down control connection") - # stop trying to reconnect (if we are) - if self._reconnection_handler: - self._reconnection_handler.cancel() + log.debug("Shutting down control connection") + # stop trying to reconnect (if we are) + if self._reconnection_handler: + self._reconnection_handler.cancel() - if self._connection: - self._connection.close() - del self._connection + if self._connection: + self._connection.close() + del self._connection def refresh_schema(self, keyspace=None, table=None, usertype=None, schema_agreement_wait=None): @@ -2390,17 +2390,21 @@ def _get_schema_mismatches(self, peers_result, local_result, local_address): return dict((version, list(nodes)) for version, nodes in six.iteritems(versions)) def _signal_error(self): - # try just signaling the cluster, as this will trigger a reconnect - # as part of marking the host down - if self._connection and self._connection.is_defunct: - host = self._cluster.metadata.get_host(self._connection.host) - # host may be None if it's already been removed, but that indicates - # that errors have already been reported, so we're fine - if host: - self._cluster.signal_connection_failure( - host, self._connection.last_error, is_host_addition=False) + with self._lock: + if self._is_shutdown: return + # try just signaling the cluster, as this will trigger a reconnect + # as part of marking the host down + if self._connection and self._connection.is_defunct: + host = self._cluster.metadata.get_host(self._connection.host) + # host may be None if it's already been removed, but that indicates + # that errors have already been reported, so we're fine + if host: + self._cluster.signal_connection_failure( + host, self._connection.last_error, is_host_addition=False) + return + # if the connection is not defunct or the host already left, reconnect # manually self.reconnect() diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 4b3c538145..54e785facc 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -228,7 +228,7 @@ def teardown_package(): log.exception('Failed to remove cluster: %s' % cluster_name) except Exception: - log.warn('Did not find cluster: %s' % cluster_name) + log.warning('Did not find cluster: %s' % cluster_name) def setup_test_keyspace(ipformat=None): From 36492777b32aecf358892144b4aca1159a907428 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 7 May 2015 16:32:04 -0500 Subject: [PATCH 1398/3726] load_balancing_policy defaults to TokeanAware(DCAware) PYTHON-160 --- cassandra/cluster.py | 33 +++++++++++++++++++++++++++------ 1 file changed, 27 insertions(+), 6 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index f1c6cd3615..436bfd929e 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -61,7 +61,7 @@ RESULT_KIND_SET_KEYSPACE, RESULT_KIND_ROWS, RESULT_KIND_SCHEMA_CHANGE) from cassandra.metadata import Metadata, protect_name -from cassandra.policies import (RoundRobinPolicy, SimpleConvictionPolicy, +from cassandra.policies import (TokenAwarePolicy, DCAwareRoundRobinPolicy, SimpleConvictionPolicy, ExponentialReconnectionPolicy, HostDistance, RetryPolicy) from cassandra.pool import (Host, _ReconnectionHandler, _HostReconnectionHandler, @@ -161,6 +161,16 @@ def _shutdown_cluster(cluster): cluster.shutdown() +# murmur3 implementation required for TokenAware is only available for CPython +import platform +if platform.python_implementation() == 'CPython': + def default_lbp_factory(): + return TokenAwarePolicy(DCAwareRoundRobinPolicy()) +else: + def default_lbp_factory(): + return DCAwareRoundRobinPolicy() + + class Cluster(object): """ The main class to use when interacting with a Cassandra cluster. @@ -185,9 +195,9 @@ class Cluster(object): Defaults to loopback interface. Note: When using :class:`.DCAwareLoadBalancingPolicy` with no explicit - local_dc set, the DC is chosen from an arbitrary host in contact_points. - In this case, contact_points should contain only nodes from a single, - local DC. + local_dc set (as is the default), the DC is chosen from an arbitrary + host in contact_points. In this case, contact_points should contain + only nodes from a single, local DC. """ port = 9042 @@ -281,7 +291,16 @@ def auth_provider(self, value): load_balancing_policy = None """ An instance of :class:`.policies.LoadBalancingPolicy` or - one of its subclasses. Defaults to :class:`~.RoundRobinPolicy`. + one of its subclasses. + + .. versionchanged:: 2.6.0 + + Defaults to :class:`~.TokenAwarePolicy` (:class:`~.DCAwareRoundRobinPolicy`). + when using CPython (where the murmur3 extension is available). :class:`~.DCAwareRoundRobinPolicy` + otherwise. Default local DC will be chosen from contact points. + + **Please see** :class:`~.DCAwareRoundRobinPolicy` **for a discussion on default behavior with respect to + DC locality and remote nodes.** """ reconnection_policy = ExponentialReconnectionPolicy(1.0, 600.0) @@ -311,6 +330,8 @@ def auth_provider(self, value): by the :attr:`~.Cluster.load_balancing_policy` will have a connection opened to them. Otherwise, they will not have a connection opened to them. + Note that the default load balancing policy ignores remote hosts by default. + .. versionadded:: 2.1.0 """ @@ -495,7 +516,7 @@ def __init__(self, self.load_balancing_policy = load_balancing_policy else: - self.load_balancing_policy = RoundRobinPolicy() + self.load_balancing_policy = default_lbp_factory() if reconnection_policy is not None: if isinstance(reconnection_policy, type): From ce15713f0898fdff2f4b5006c55b9e8c00b3458a Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 7 May 2015 16:51:07 -0500 Subject: [PATCH 1399/3726] Remove noop from Timer class, use flag. --- cassandra/connection.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/cassandra/connection.py b/cassandra/connection.py index 54641a3ac5..435f1e6f75 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -896,29 +896,29 @@ def _raise_if_stopped(self): class Timer(object): - cancelled = False + canceled = False def __init__(self, timeout, callback): self.end = time.time() + timeout self.callback = callback if timeout < 0: - self.on_timeout() + self.callback() def __lt__(self, other): return self.end < other.end def cancel(self): - self.callback = self._noop - self.cancelled = True + self.canceled = True def finish(self, time_now): - if self.callback is self._noop or time_now >= self.end: + if self.canceled: + return True + + if time_now >= self.end: self.callback() return True - return False - def _noop(self): - pass + return False class TimerManager(object): From fab252154efa23adceecef1f0403c69f0d937813 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 8 May 2015 10:04:58 -0500 Subject: [PATCH 1400/3726] Explanation of eventletreactor service_timeouts routine --- cassandra/io/eventletreactor.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/cassandra/io/eventletreactor.py b/cassandra/io/eventletreactor.py index 5db0816c88..aceac55e10 100644 --- a/cassandra/io/eventletreactor.py +++ b/cassandra/io/eventletreactor.py @@ -16,7 +16,6 @@ # Originally derived from MagnetoDB source: # https://github.com/stackforge/magnetodb/blob/2015.1.0b1/magnetodb/common/cassandra/io/eventletreactor.py -from collections import defaultdict from errno import EALREADY, EINPROGRESS, EWOULDBLOCK, EINVAL import eventlet from eventlet.green import select, socket @@ -73,6 +72,12 @@ def create_timer(cls, timeout, callback): @classmethod def service_timeouts(cls): + """ + cls._timeout_watcher runs in this loop forever. + It is usually waiting for the next timeout on the cls._new_timer Event. + When new timers are added, that event is set so that the watcher can + wake up and possibly set an earlier timeout. + """ timer_manager = cls._timers while True: next_end = timer_manager.service_timeouts() From 2d3e33068071142c59bdaed8c1c6b18152d43d62 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 8 May 2015 11:01:51 -0500 Subject: [PATCH 1401/3726] Explain TwistedLoop add_timer thread interaction --- cassandra/io/twistedreactor.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cassandra/io/twistedreactor.py b/cassandra/io/twistedreactor.py index 9b835fe137..0f5c841c75 100644 --- a/cassandra/io/twistedreactor.py +++ b/cassandra/io/twistedreactor.py @@ -137,6 +137,8 @@ def _cleanup(self): def add_timer(self, timer): self._timers.add_timer(timer) + # callFromThread to schedule from the loop thread, where + # the timeout task can safely be modified reactor.callFromThread(self._schedule_timeout, timer.end) def _schedule_timeout(self, next_timeout): From 7e70d5543d08035ff7a4a94a7e9564f5ffc7281c Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 11 May 2015 09:53:18 -0500 Subject: [PATCH 1402/3726] Coarse locking on TimerManager.service_timeouts --- cassandra/connection.py | 33 +++++++++++++++++++-------------- 1 file changed, 19 insertions(+), 14 deletions(-) diff --git a/cassandra/connection.py b/cassandra/connection.py index 435f1e6f75..707a2a4db0 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -16,11 +16,12 @@ from collections import defaultdict, deque import errno from functools import wraps, partial +from heapq import heappush, heappop import io import logging import os import sys -from threading import Thread, Event, RLock +from threading import Thread, Event, RLock, Lock import time if 'gevent.monkey' in sys.modules: @@ -29,7 +30,6 @@ from six.moves.queue import Queue, Empty # noqa import six -from six.moves import queue from six.moves import range from cassandra import ConsistencyLevel, AuthenticationFailed, OperationTimedOut @@ -924,10 +924,12 @@ def finish(self, time_now): class TimerManager(object): def __init__(self): - self._timers = queue.PriorityQueue() + self._queue = [] + self._lock = Lock() def add_timer(self, timer): - self._timers.put_nowait(timer) + with self._lock: + heappush(self._queue, timer) def service_timeouts(self): """ @@ -935,26 +937,29 @@ def service_timeouts(self): :return: next end time, or None """ now = time.time() - while not self._timers.empty(): - timer = self._timers.get_nowait() - try: - if not timer.finish(now): - self._timers.put_nowait(timer) - return timer.end - except Exception: - log.exception("Exception while servicing timeout callback: ") + queue = self._queue + with self._lock: + while queue: + try: + timer = queue[0] + if timer.finish(now): + heappop(queue) + else: + return timer.end + except Exception: + log.exception("Exception while servicing timeout callback: ") @property def next_timeout(self): try: - return self._timers.queue[0].end + return self._queue[0].end except IndexError: pass @property def next_offset(self): try: - next_end = self._timers.queue[0].end + next_end = self._queue[0].end return next_end - time.time() except IndexError: pass From 03068041882c054c27435cd02405ab2072ceb158 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 11 May 2015 11:41:47 -0500 Subject: [PATCH 1403/3726] Remove lock in TimerManager --- cassandra/connection.py | 34 ++++++++++++++++++++-------------- 1 file changed, 20 insertions(+), 14 deletions(-) diff --git a/cassandra/connection.py b/cassandra/connection.py index 707a2a4db0..f3b47c079f 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -925,29 +925,35 @@ class TimerManager(object): def __init__(self): self._queue = [] - self._lock = Lock() + self._new_timers = [] def add_timer(self, timer): - with self._lock: - heappush(self._queue, timer) + """ + called from client thread with a Timer object + """ + self._new_timers.append(timer) def service_timeouts(self): """ run callbacks on all expired timers + Called from the event thread :return: next end time, or None """ - now = time.time() queue = self._queue - with self._lock: - while queue: - try: - timer = queue[0] - if timer.finish(now): - heappop(queue) - else: - return timer.end - except Exception: - log.exception("Exception while servicing timeout callback: ") + new_timers = self._new_timers + while self._new_timers: + t = new_timers.pop() + heappush(queue, t) + now = time.time() + while queue: + try: + timer = queue[0] + if timer.finish(now): + heappop(queue) + else: + return timer.end + except Exception: + log.exception("Exception while servicing timeout callback: ") @property def next_timeout(self): From dd737915a9710b779157c7484a2d9dcc9db3ed32 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 11 May 2015 11:53:33 -0500 Subject: [PATCH 1404/3726] Timer queue is now tuple instead of using __lt__ on objects --- cassandra/connection.py | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/cassandra/connection.py b/cassandra/connection.py index f3b47c079f..0d0c024b0d 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -904,9 +904,6 @@ def __init__(self, timeout, callback): if timeout < 0: self.callback() - def __lt__(self, other): - return self.end < other.end - def cancel(self): self.canceled = True @@ -931,7 +928,7 @@ def add_timer(self, timer): """ called from client thread with a Timer object """ - self._new_timers.append(timer) + self._new_timers.append((timer.end, timer)) def service_timeouts(self): """ @@ -942,12 +939,11 @@ def service_timeouts(self): queue = self._queue new_timers = self._new_timers while self._new_timers: - t = new_timers.pop() - heappush(queue, t) + heappush(queue, new_timers.pop()) now = time.time() while queue: try: - timer = queue[0] + timer = queue[0][1] if timer.finish(now): heappop(queue) else: @@ -958,14 +954,14 @@ def service_timeouts(self): @property def next_timeout(self): try: - return self._queue[0].end + return self._queue[0][0] except IndexError: pass @property def next_offset(self): try: - next_end = self._queue[0].end + next_end = self._queue[0][0] return next_end - time.time() except IndexError: pass From 8f08a885d8c1e4500f914b88c53279238d4251da Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 11 May 2015 12:47:17 -0500 Subject: [PATCH 1405/3726] populate ResponseFuture OpterationTimedOut with errors, host --- cassandra/cluster.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index b8a236a30e..515f18cbf1 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2609,7 +2609,7 @@ def _cancel_timer(self): self._timer.cancel() def _on_timeout(self): - self._set_final_exception(OperationTimedOut()) + self._set_final_exception(OperationTimedOut(self._errors, self._current_host)) def _make_query_plan(self): # convert the list/generator/etc to an iterator so that subsequent From 7ab74a7f1a7cc03bb3a8cada6fb257d0750bf36b Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 11 May 2015 13:06:27 -0500 Subject: [PATCH 1406/3726] Remove outdated note about no errback on timeout --- cassandra/cluster.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 515f18cbf1..f5dd46ab0f 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2980,11 +2980,6 @@ def result(self, timeout=_NOT_SET): This is a client-side timeout. For more information about server-side coordinator timeouts, see :class:`.policies.RetryPolicy`. - **Important**: This timeout currently has no effect on callbacks registered - on a :class:`~.ResponseFuture` through :meth:`.ResponseFuture.add_callback` or - :meth:`.ResponseFuture.add_errback`; even if a query exceeds this default - timeout, neither the registered callback or errback will be called. - Example usage:: >>> future = session.execute_async("SELECT * FROM mycf") @@ -3006,6 +3001,9 @@ def result(self, timeout=_NOT_SET): timeout = None self._event.wait(timeout) + # TODO: remove this conditional when deprecated timeout parameter is removed + if not self._event.is_set(): + self._on_timeout() if self._final_result is not _NOT_SET: if self._paging_state is None: return self._final_result From 5a235c46ffad7119e2dd6fff46861ef26d80d0f2 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 11 May 2015 14:41:18 -0500 Subject: [PATCH 1407/3726] Make unit test for large DateType work on 32-bit platforms --- tests/unit/test_types.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unit/test_types.py b/tests/unit/test_types.py index 6bfe75b459..bc0eac2da0 100644 --- a/tests/unit/test_types.py +++ b/tests/unit/test_types.py @@ -336,7 +336,7 @@ def test_datetype(self): # beyond 32b expected = 2 ** 33 - self.assertEqual(DateType.deserialize(int64_pack(1000 * expected), 0), datetime.datetime.utcfromtimestamp(expected)) + self.assertEqual(DateType.deserialize(int64_pack(1000 * expected), 0), datetime.datetime(2242, 3, 16, 12, 56, 32)) # less than epoc (PYTHON-119) expected = -770172256 From f4434944b2d4b8045bbb7892195d32ce7d3c2966 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 11 May 2015 14:57:02 -0500 Subject: [PATCH 1408/3726] Convert errors on re-prepare to API exceptions --- cassandra/cluster.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index c5c83a730f..416583aed2 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2875,7 +2875,10 @@ def _execute_after_prepare(self, response): "Got unexpected response when preparing statement " "on host %s: %s" % (self._current_host, response))) elif isinstance(response, ErrorMessage): - self._set_final_exception(response) + if hasattr(response, 'to_exception'): + self._set_final_exception(response.to_exception()) + else: + self._set_final_exception(response) elif isinstance(response, ConnectionException): log.debug("Connection error when preparing statement on host %s: %s", self._current_host, response) From 2464085bccd62fefe1e8e9734be9bb1363da4f00 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 12 May 2015 08:08:17 -0500 Subject: [PATCH 1409/3726] Pass host through to Auth instance in SaslAuthProvider.new_authenticator PYTHON-300 --- cassandra/auth.py | 7 ++++--- .../integration/standard/test_authentication.py | 16 ++++++++++++++-- 2 files changed, 18 insertions(+), 5 deletions(-) diff --git a/cassandra/auth.py b/cassandra/auth.py index 67d302a9e8..0cb4c6b276 100644 --- a/cassandra/auth.py +++ b/cassandra/auth.py @@ -139,8 +139,7 @@ class SaslAuthProvider(AuthProvider): from cassandra.cluster import Cluster from cassandra.auth import SaslAuthProvider - sasl_kwargs = {'host': 'localhost', - 'service': 'dse', + sasl_kwargs = {'service': 'dse', 'mechanism': 'GSSAPI', 'qops': 'auth'.split(',')} auth_provider = SaslAuthProvider(**sasl_kwargs) @@ -152,10 +151,12 @@ class SaslAuthProvider(AuthProvider): def __init__(self, **sasl_kwargs): if SASLClient is None: raise ImportError('The puresasl library has not been installed') + if 'host' in sasl_kwargs: + raise ValueError("kwargs should not contain 'host' since it is passed dynamically to new_authenticator") self.sasl_kwargs = sasl_kwargs def new_authenticator(self, host): - return SaslAuthenticator(**self.sasl_kwargs) + return SaslAuthenticator(host, **self.sasl_kwargs) class SaslAuthenticator(Authenticator): """ diff --git a/tests/integration/standard/test_authentication.py b/tests/integration/standard/test_authentication.py index 3820ef7a90..80c99ede32 100644 --- a/tests/integration/standard/test_authentication.py +++ b/tests/integration/standard/test_authentication.py @@ -138,10 +138,22 @@ def setUp(self): raise unittest.SkipTest('pure-sasl is not installed') def get_authentication_provider(self, username, password): - sasl_kwargs = {'host': 'localhost', - 'service': 'cassandra', + sasl_kwargs = {'service': 'cassandra', 'mechanism': 'PLAIN', 'qops': ['auth'], 'username': username, 'password': password} return SaslAuthProvider(**sasl_kwargs) + + # these could equally be unit tests + def test_host_passthrough(self): + sasl_kwargs = {'service': 'cassandra', + 'mechanism': 'PLAIN'} + provider = SaslAuthProvider(**sasl_kwargs) + host = 'thehostname' + authenticator = provider.new_authenticator(host) + self.assertEqual(authenticator.sasl.host, host) + + def test_host_rejected(self): + sasl_kwargs = {'host': 'something'} + self.assertRaises(ValueError, SaslAuthProvider, **sasl_kwargs) From 3825c3a9120040050c20e7e80b905ebc65bb03d6 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 12 May 2015 09:06:22 -0500 Subject: [PATCH 1410/3726] length validation for custom payload map --- cassandra/protocol.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cassandra/protocol.py b/cassandra/protocol.py index 1a5531022c..b0f9e6e296 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -103,6 +103,8 @@ def update_custom_payload(self, other): if not self.custom_payload: self.custom_payload = {} self.custom_payload.update(other) + if len(self.custom_payload) > 65535: + raise ValueError("Custom payload map exceeds max count allowed by protocol (65535)") def __repr__(self): return '<%s(%s)>' % (self.__class__.__name__, ', '.join('%s=%r' % i for i in _get_params(self))) From 58536f57467db9998417eff57304ef40562f5975 Mon Sep 17 00:00:00 2001 From: Carl Yeksigian Date: Mon, 27 Apr 2015 17:28:47 -0400 Subject: [PATCH 1411/3726] Add trace_id to statement --- cassandra/cluster.py | 2 ++ cassandra/query.py | 6 ++++++ 2 files changed, 8 insertions(+) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index c5c83a730f..8f41948681 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2700,6 +2700,8 @@ def _set_result(self, response): trace_id = getattr(response, 'trace_id', None) if trace_id: + if self.query: + self.query.trace_id = trace_id self._query_trace = QueryTrace(trace_id, self.session) if isinstance(response, ResultMessage): diff --git a/cassandra/query.py b/cassandra/query.py index 7f7756b3fb..1232514bfa 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -167,6 +167,12 @@ class Statement(object): this will be set to a :class:`.QueryTrace` instance. """ + trace_id = None + """ + If :meth:`.Session.execute()` is run with `trace` set to :const:`True`, + this will be set to the tracing ID from the server. + """ + consistency_level = None """ The :class:`.ConsistencyLevel` to be used for this operation. Defaults From bc3ea9b1bc6ff03811332b7923a3eec3f8abde7d Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 12 May 2015 11:19:51 -0500 Subject: [PATCH 1412/3726] Test for trace_id on query PYTHON-302 --- tests/integration/standard/test_query.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index b30c387b01..6b4eb25e97 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -73,6 +73,21 @@ def test_trace_prints_okay(self): cluster.shutdown() + def test_trace_id_to_query(self): + cluster = Cluster(protocol_version=PROTOCOL_VERSION) + session = cluster.connect() + + query = "SELECT * FROM system.local" + statement = SimpleStatement(query) + self.assertIsNone(statement.trace_id) + future = session.execute_async(statement, trace=True) + + # query should have trace_id, even before trace is obtained + future.result() + self.assertIsNotNone(statement.trace_id) + + cluster.shutdown() + def test_trace_ignores_row_factory(self): cluster = Cluster(protocol_version=PROTOCOL_VERSION) session = cluster.connect() From 90e2ace36f56080a77a940fe59a662c89334599b Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 12 May 2015 15:04:43 -0500 Subject: [PATCH 1413/3726] cqle: Protect ks/cf identifiers that use keywords PYTHON-244 --- cassandra/cqlengine/management.py | 15 +++---- cassandra/cqlengine/models.py | 43 +++++++++++-------- .../integration/cqlengine/model/test_model.py | 37 +++++++++++++++- 3 files changed, 69 insertions(+), 26 deletions(-) diff --git a/cassandra/cqlengine/management.py b/cassandra/cqlengine/management.py index 7ea642af8d..d84406f959 100644 --- a/cassandra/cqlengine/management.py +++ b/cassandra/cqlengine/management.py @@ -81,7 +81,7 @@ def create_keyspace(name, strategy_class, replication_factor, durable_writes=Tru query = """ CREATE KEYSPACE {} WITH REPLICATION = {} - """.format(name, json.dumps(replication_map).replace('"', "'")) + """.format(metadata.protect_name(name), json.dumps(replication_map).replace('"', "'")) if strategy_class != 'SimpleStrategy': query += " AND DURABLE_WRITES = {}".format('true' if durable_writes else 'false') @@ -163,7 +163,7 @@ def drop_keyspace(name): cluster = get_cluster() if name in cluster.metadata.keyspaces: - execute("DROP KEYSPACE {}".format(name)) + execute("DROP KEYSPACE {}".format(metadata.protect_name(name))) def sync_table(model): @@ -191,9 +191,8 @@ def sync_table(model): if model.__abstract__: raise CQLEngineException("cannot create table from abstract model") - # construct query string cf_name = model.column_family_name() - raw_cf_name = model.column_family_name(include_keyspace=False) + raw_cf_name = model._raw_column_family_name() ks_name = model._get_keyspace() @@ -433,7 +432,7 @@ def setter(key, limited_to_strategy=None): def get_fields(model): # returns all fields that aren't part of the PK ks_name = model._get_keyspace() - col_family = model.column_family_name(include_keyspace=False) + col_family = model._raw_column_family_name() field_types = ['regular', 'static'] query = "select * from system.schema_columns where keyspace_name = %s and columnfamily_name = %s" tmp = execute(query, [ks_name, col_family]) @@ -452,7 +451,7 @@ def get_table_settings(model): # returns the table as provided by the native driver for a given model cluster = get_cluster() ks = model._get_keyspace() - table = model.column_family_name(include_keyspace=False) + table = model._raw_column_family_name() table = cluster.metadata.keyspaces[ks].tables[table] return table @@ -520,11 +519,11 @@ def drop_table(model): meta = get_cluster().metadata ks_name = model._get_keyspace() - raw_cf_name = model.column_family_name(include_keyspace=False) + raw_cf_name = model._raw_column_family_name() try: meta.keyspaces[ks_name].tables[raw_cf_name] - execute('drop table {};'.format(model.column_family_name(include_keyspace=True))) + execute('drop table {};'.format(model.column_family_name())) except KeyError: pass diff --git a/cassandra/cqlengine/models.py b/cassandra/cqlengine/models.py index 0baa0906a0..4f6d120250 100644 --- a/cassandra/cqlengine/models.py +++ b/cassandra/cqlengine/models.py @@ -23,6 +23,7 @@ from cassandra.cqlengine import query from cassandra.cqlengine.query import DoesNotExist as _DoesNotExist from cassandra.cqlengine.query import MultipleObjectsReturned as _MultipleObjectsReturned +from cassandra.metadata import protect_name from cassandra.util import OrderedDict log = logging.getLogger(__name__) @@ -353,6 +354,8 @@ class MultipleObjectsReturned(_MultipleObjectsReturned): _if_not_exists = False # optional if_not_exists flag to check existence before insertion + _table_name = None # used internally to cache a derived table name + def __init__(self, **values): self._values = {} self._ttl = self.__default_ttl__ @@ -504,27 +507,33 @@ def column_family_name(cls, include_keyspace=True): Returns the column family name if it's been defined otherwise, it creates it from the module and class name """ - cf_name = '' - if cls.__table_name__: - cf_name = cls.__table_name__.lower() - else: - # get polymorphic base table names if model is polymorphic - if cls._is_polymorphic and not cls._is_polymorphic_base: - return cls._polymorphic_base.column_family_name(include_keyspace=include_keyspace) + cf_name = protect_name(cls._raw_column_family_name()) + if include_keyspace: + return '{}.{}'.format(protect_name(cls._get_keyspace()), cf_name) + + return cf_name - camelcase = re.compile(r'([a-z])([A-Z])') - ccase = lambda s: camelcase.sub(lambda v: '{}_{}'.format(v.group(1), v.group(2).lower()), s) - cf_name += ccase(cls.__name__) - # trim to less than 48 characters or cassandra will complain - cf_name = cf_name[-48:] - cf_name = cf_name.lower() - cf_name = re.sub(r'^_+', '', cf_name) + @classmethod + def _raw_column_family_name(cls): + if not cls._table_name: + if cls.__table_name__: + cls._table_name = cls.__table_name__.lower() + else: + if cls._is_polymorphic and not cls._is_polymorphic_base: + cls._table_name = cls._polymorphic_base._raw_column_family_name() + else: + camelcase = re.compile(r'([a-z])([A-Z])') + ccase = lambda s: camelcase.sub(lambda v: '{}_{}'.format(v.group(1), v.group(2).lower()), s) - if not include_keyspace: - return cf_name + cf_name = ccase(cls.__name__) + # trim to less than 48 characters or cassandra will complain + cf_name = cf_name[-48:] + cf_name = cf_name.lower() + cf_name = re.sub(r'^_+', '', cf_name) + cls._table_name = cf_name - return '{}.{}'.format(cls._get_keyspace(), cf_name) + return cls._table_name def validate(self): """ diff --git a/tests/integration/cqlengine/model/test_model.py b/tests/integration/cqlengine/model/test_model.py index bdb045954a..c37e088f54 100644 --- a/tests/integration/cqlengine/model/test_model.py +++ b/tests/integration/cqlengine/model/test_model.py @@ -14,8 +14,9 @@ from unittest import TestCase -from cassandra.cqlengine.models import Model, ModelDefinitionException from cassandra.cqlengine import columns +from cassandra.cqlengine.management import sync_table, drop_table, create_keyspace_simple, drop_keyspace +from cassandra.cqlengine.models import Model, ModelDefinitionException class TestModel(TestCase): @@ -49,6 +50,40 @@ class EqualityModel1(Model): self.assertEqual(m0, m0) self.assertNotEqual(m0, m1) + def test_keywords_as_names(self): + create_keyspace_simple('keyspace', 1) + + class table(Model): + __keyspace__ = 'keyspace' + select = columns.Integer(primary_key=True) + table = columns.Text() + + # create should work + drop_table(table) + sync_table(table) + + created = table.create(select=0, table='table') + selected = table.objects(select=0)[0] + self.assertEqual(created.select, selected.select) + self.assertEqual(created.table, selected.table) + + # alter should work + class table(Model): + __keyspace__ = 'keyspace' + select = columns.Integer(primary_key=True) + table = columns.Text() + where = columns.Text() + + sync_table(table) + + created = table.create(select=1, table='table') + selected = table.objects(select=1)[0] + self.assertEqual(created.select, selected.select) + self.assertEqual(created.table, selected.table) + self.assertEqual(created.where, selected.where) + + drop_keyspace('keyspace') + class BuiltInAttributeConflictTest(TestCase): """tests Model definitions that conflict with built-in attributes/methods""" From 5c19ad507ceba7731494ddbe8fff06d146e8c789 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Thu, 7 May 2015 16:42:42 -0500 Subject: [PATCH 1414/3726] Fixing timeout issues --- tests/integration/long/test_large_data.py | 86 +++++++++++++++++++---- 1 file changed, 74 insertions(+), 12 deletions(-) diff --git a/tests/integration/long/test_large_data.py b/tests/integration/long/test_large_data.py index acb203568b..0b77feb0cb 100644 --- a/tests/integration/long/test_large_data.py +++ b/tests/integration/long/test_large_data.py @@ -72,6 +72,7 @@ def make_session_and_keyspace(self): def batch_futures(self, session, statement_generator): concurrency = 10 futures = Queue(maxsize=concurrency) + number_of_timeouts = 0 for i, statement in enumerate(statement_generator): if i > 0 and i % (concurrency - 1) == 0: # clear the existing queue @@ -80,6 +81,7 @@ def batch_futures(self, session, statement_generator): futures.get_nowait().result() except (OperationTimedOut, WriteTimeout): ex_type, ex, tb = sys.exc_info() + number_of_timeouts += 1 log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) del tb time.sleep(1) @@ -94,11 +96,13 @@ def batch_futures(self, session, statement_generator): futures.get_nowait().result() except (OperationTimedOut, WriteTimeout): ex_type, ex, tb = sys.exc_info() + number_of_timeouts += 1 log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) del tb time.sleep(1) except Empty: break + return number_of_timeouts def test_wide_rows(self): table = 'wide_rows' @@ -120,48 +124,103 @@ def test_wide_rows(self): session.cluster.shutdown() def test_wide_batch_rows(self): + """ + Test for inserting wide rows with batching + + test_wide_batch_rows tests inserting a wide row of data using batching. It will then attempt to query + that data and ensure that all of it has been inserted appropriately. + + @expected_result all items should be inserted, and verified. + + @test_category queries:batch + """ + + # Table Creation table = 'wide_batch_rows' session = self.make_session_and_keyspace() session.execute('CREATE TABLE %s (k INT, i INT, PRIMARY KEY(k, i))' % table) - # Write + # Run batch insert statement = 'BEGIN BATCH ' - for i in range(2000): + to_insert = 2000 + for i in range(to_insert): statement += 'INSERT INTO %s (k, i) VALUES (%s, %s) ' % (table, 0, i) statement += 'APPLY BATCH' statement = SimpleStatement(statement, consistency_level=ConsistencyLevel.QUORUM) - session.execute(statement) - # Read - results = session.execute('SELECT i FROM %s WHERE k=%s' % (table, 0)) + # Execute insert with larger timeout, since it's a wide row + try: + session.execute(statement,timeout=30.0) + + except OperationTimedOut: + #If we timeout on insertion that's bad but it could be just slow underlying c* + #Attempt to validate anyway, we will fail if we don't get the right data back. + ex_type, ex, tb = sys.exc_info() + log.warn("Batch wide row insertion timed out, this may require additional investigation") + log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) + del tb # Verify - for i, row in enumerate(results): - self.assertEqual(row['i'], i) + results = session.execute('SELECT i FROM %s WHERE k=%s' % (table, 0)) + lastvalue = 0 + for j, row in enumerate(results): + lastValue=row['i'] + self.assertEqual(lastValue, j) + + #check the last value make sure it's what we expect + index_value = to_insert-1 + self.assertEqual(lastValue,index_value,"Verification failed only found {0} inserted we were expecting {1}".format(j,index_value)) session.cluster.shutdown() def test_wide_byte_rows(self): + """ + Test for inserting wide row of bytes + + test_wide_batch_rows tests inserting a wide row of data bytes. It will then attempt to query + that data and ensure that all of it has been inserted appropriately. + + @expected_result all items should be inserted, and verified. + + @test_category queries + """ + + # Table creation table = 'wide_byte_rows' session = self.make_session_and_keyspace() session.execute('CREATE TABLE %s (k INT, i INT, v BLOB, PRIMARY KEY(k, i))' % table) + # Prepare statement and run insertions + to_insert = 100000 prepared = session.prepare('INSERT INTO %s (k, i, v) VALUES (0, ?, 0xCAFE)' % (table, )) - - # Write - self.batch_futures(session, (prepared.bind((i, )) for i in range(100000))) + timeouts = self.batch_futures(session, (prepared.bind((i, )) for i in range(to_insert))) # Read results = session.execute('SELECT i, v FROM %s WHERE k=0' % (table, )) + # number of expected results + expected_results = to_insert-timeouts-1 + # Verify bb = pack('>H', 0xCAFE) - for row in results: + for i, row in enumerate(results): self.assertEqual(row['v'], bb) + self.assertGreaterEqual(i, expected_results, "Verification failed only found {0} inserted we were expecting {1}".format(i,expected_results)) + session.cluster.shutdown() def test_large_text(self): + """ + Test for inserting a large text field + + test_large_text tests inserting a large text field into a row. + + @expected_result the large text value should be inserted. When the row is queried it should match the original + value that was inserted + + @test_category queries + """ table = 'large_text' session = self.make_session_and_keyspace() session.execute('CREATE TABLE %s (k int PRIMARY KEY, txt text)' % table) @@ -178,8 +237,11 @@ def test_large_text(self): result = session.execute('SELECT * FROM %s WHERE k=%s' % (table, 0)) # Verify - for row in result: + found_result = False + for i, row in enumerate(result): self.assertEqual(row['txt'], text) + found_result = True + self.assertTrue(found_result, "No results were found") session.cluster.shutdown() From c841c412995788c8ea6e5c96365839e6a28acf01 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 13 May 2015 14:59:10 -0500 Subject: [PATCH 1415/3726] cqle: Make sure user type is registered on sync even if no change is made. --- cassandra/cqlengine/management.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cassandra/cqlengine/management.py b/cassandra/cqlengine/management.py index 7ea642af8d..284c1404fb 100644 --- a/cassandra/cqlengine/management.py +++ b/cassandra/cqlengine/management.py @@ -313,6 +313,8 @@ def _sync_type(ks_name, type_model, omit_subtypes=None): if field.db_field_name not in defined_fields: execute("ALTER TYPE {} ADD {}".format(type_name_qualified, field.get_column_def())) + type_model.register_for_keyspace(ks_name) + if len(defined_fields) == len(model_fields): log.info("Type %s did not require synchronization", type_name_qualified) return @@ -321,8 +323,6 @@ def _sync_type(ks_name, type_model, omit_subtypes=None): if db_fields_not_in_model: log.info("Type %s has fields not referenced by model: %s", type_name_qualified, db_fields_not_in_model) - type_model.register_for_keyspace(ks_name) - def get_create_type(type_model, keyspace): type_meta = metadata.UserType(keyspace, From d9cbb6e2689a31ffcb6cd5d243d2945c6233b697 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 13 May 2015 14:59:53 -0500 Subject: [PATCH 1416/3726] cqle: Correct CQL encoding for collections with nested types. PYTHON-311 Removes superfluous quoting mechanism, allowing collections to correctly encode contents (instead of being implicitly strinified). --- cassandra/cqlengine/columns.py | 68 +------------------ cassandra/cqlengine/statements.py | 2 +- .../columns/test_container_columns.py | 12 ++-- .../cqlengine/columns/test_value_io.py | 9 --- .../statements/test_update_statement.py | 10 +-- 5 files changed, 15 insertions(+), 86 deletions(-) diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index ae17779590..8a09ba1254 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -73,25 +73,6 @@ def get_property(self): return property(_get, _set) -class ValueQuoter(object): - """ - contains a single value, which will quote itself for CQL insertion statements - """ - def __init__(self, value): - self.value = value - - def __str__(self): - raise NotImplementedError - - def __repr__(self): - return self.__str__() - - def __eq__(self, other): - if isinstance(other, self.__class__): - return self.value == other.value - return False - - class Column(object): # the cassandra type this column maps to @@ -701,24 +682,12 @@ def sub_columns(self): return [self.value_col] -class BaseContainerQuoter(ValueQuoter): - - def __nonzero__(self): - return bool(self.value) - - class Set(BaseContainerColumn): """ Stores a set of unordered, unique values http://www.datastax.com/documentation/cql/3.1/cql/cql_using/use_set_t.html """ - class Quoter(BaseContainerQuoter): - - def __str__(self): - cq = cql_quote - return '{' + ', '.join([cq(v) for v in self.value]) + '}' - def __init__(self, value_type, strict=True, default=set, **kwargs): """ :param value_type: a column class indicating the types of the value @@ -753,10 +722,7 @@ def to_python(self, value): def to_database(self, value): if value is None: return None - - if isinstance(value, self.Quoter): - return value - return self.Quoter({self.value_col.to_database(v) for v in value}) + return {self.value_col.to_database(v) for v in value} class List(BaseContainerColumn): @@ -765,15 +731,6 @@ class List(BaseContainerColumn): http://www.datastax.com/documentation/cql/3.1/cql/cql_using/use_list_t.html """ - class Quoter(BaseContainerQuoter): - - def __str__(self): - cq = cql_quote - return '[' + ', '.join([cq(v) for v in self.value]) + ']' - - def __nonzero__(self): - return bool(self.value) - def __init__(self, value_type, default=list, **kwargs): """ :param value_type: a column class indicating the types of the value @@ -799,9 +756,7 @@ def to_python(self, value): def to_database(self, value): if value is None: return None - if isinstance(value, self.Quoter): - return value - return self.Quoter([self.value_col.to_database(v) for v in value]) + return [self.value_col.to_database(v) for v in value] class Map(BaseContainerColumn): @@ -810,21 +765,6 @@ class Map(BaseContainerColumn): http://www.datastax.com/documentation/cql/3.1/cql/cql_using/use_map_t.html """ - class Quoter(BaseContainerQuoter): - - def __str__(self): - cq = cql_quote - return '{' + ', '.join([cq(k) + ':' + cq(v) for k, v in self.value.items()]) + '}' - - def get(self, key): - return self.value.get(key) - - def keys(self): - return self.value.keys() - - def items(self): - return self.value.items() - def __init__(self, key_type, value_type, default=dict, **kwargs): """ :param key_type: a column class indicating the types of the key @@ -866,9 +806,7 @@ def to_python(self, value): def to_database(self, value): if value is None: return None - if isinstance(value, self.Quoter): - return value - return self.Quoter({self.key_col.to_database(k): self.value_col.to_database(v) for k, v in value.items()}) + return {self.key_col.to_database(k): self.value_col.to_database(v) for k, v in value.items()} @property def sub_columns(self): diff --git a/cassandra/cqlengine/statements.py b/cassandra/cqlengine/statements.py index f64cc7a0de..866f6f2958 100644 --- a/cassandra/cqlengine/statements.py +++ b/cassandra/cqlengine/statements.py @@ -658,7 +658,7 @@ def get_context(self): class InsertStatement(AssignmentStatement): - """ an cql insert select statement """ + """ an cql insert statement """ def __init__(self, table, diff --git a/tests/integration/cqlengine/columns/test_container_columns.py b/tests/integration/cqlengine/columns/test_container_columns.py index 03a94e0df4..a9c8b35965 100644 --- a/tests/integration/cqlengine/columns/test_container_columns.py +++ b/tests/integration/cqlengine/columns/test_container_columns.py @@ -161,8 +161,8 @@ def test_to_python(self): column = columns.Set(JsonTestColumn) val = {1, 2, 3} db_val = column.to_database(val) - assert db_val.value == {json.dumps(v) for v in val} - py_val = column.to_python(db_val.value) + assert db_val == {json.dumps(v) for v in val} + py_val = column.to_python(db_val) assert py_val == val def test_default_empty_container_saving(self): @@ -277,8 +277,8 @@ def test_to_python(self): column = columns.List(JsonTestColumn) val = [1, 2, 3] db_val = column.to_database(val) - assert db_val.value == [json.dumps(v) for v in val] - py_val = column.to_python(db_val.value) + assert db_val == [json.dumps(v) for v in val] + py_val = column.to_python(db_val) assert py_val == val def test_default_empty_container_saving(self): @@ -495,8 +495,8 @@ def test_to_python(self): column = columns.Map(JsonTestColumn, JsonTestColumn) val = {1: 2, 3: 4, 5: 6} db_val = column.to_database(val) - assert db_val.value == {json.dumps(k):json.dumps(v) for k,v in val.items()} - py_val = column.to_python(db_val.value) + assert db_val == {json.dumps(k):json.dumps(v) for k,v in val.items()} + py_val = column.to_python(db_val) assert py_val == val def test_default_empty_container_saving(self): diff --git a/tests/integration/cqlengine/columns/test_value_io.py b/tests/integration/cqlengine/columns/test_value_io.py index 04be247a84..17f96e2242 100644 --- a/tests/integration/cqlengine/columns/test_value_io.py +++ b/tests/integration/cqlengine/columns/test_value_io.py @@ -22,7 +22,6 @@ from cassandra.cqlengine.management import sync_table from cassandra.cqlengine.management import drop_table from cassandra.cqlengine.models import Model -from cassandra.cqlengine.columns import ValueQuoter from cassandra.cqlengine import columns import unittest @@ -211,11 +210,3 @@ class TestDecimalIO(BaseColumnIOTest): def comparator_converter(self, val): return Decimal(val) - -class TestQuoter(unittest.TestCase): - - def test_equals(self): - assert ValueQuoter(False) == ValueQuoter(False) - assert ValueQuoter(1) == ValueQuoter(1) - assert ValueQuoter("foo") == ValueQuoter("foo") - assert ValueQuoter(1.55) == ValueQuoter(1.55) diff --git a/tests/integration/cqlengine/statements/test_update_statement.py b/tests/integration/cqlengine/statements/test_update_statement.py index 070710146b..0f77d53ca4 100644 --- a/tests/integration/cqlengine/statements/test_update_statement.py +++ b/tests/integration/cqlengine/statements/test_update_statement.py @@ -60,25 +60,25 @@ def test_additional_rendering(self): def test_update_set_add(self): us = UpdateStatement('table') - us.add_assignment_clause(SetUpdateClause('a', Set.Quoter({1}), operation='add')) + us.add_assignment_clause(SetUpdateClause('a', {1}, operation='add')) self.assertEqual(six.text_type(us), 'UPDATE table SET "a" = "a" + %(0)s') def test_update_empty_set_add_does_not_assign(self): us = UpdateStatement('table') - us.add_assignment_clause(SetUpdateClause('a', Set.Quoter(set()), operation='add')) + us.add_assignment_clause(SetUpdateClause('a', set(), operation='add')) self.assertEqual(six.text_type(us), 'UPDATE table SET "a" = "a" + %(0)s') def test_update_empty_set_removal_does_not_assign(self): us = UpdateStatement('table') - us.add_assignment_clause(SetUpdateClause('a', Set.Quoter(set()), operation='remove')) + us.add_assignment_clause(SetUpdateClause('a', set(), operation='remove')) self.assertEqual(six.text_type(us), 'UPDATE table SET "a" = "a" - %(0)s') def test_update_list_prepend_with_empty_list(self): us = UpdateStatement('table') - us.add_assignment_clause(ListUpdateClause('a', List.Quoter([]), operation='prepend')) + us.add_assignment_clause(ListUpdateClause('a', [], operation='prepend')) self.assertEqual(six.text_type(us), 'UPDATE table SET "a" = %(0)s + "a"') def test_update_list_append_with_empty_list(self): us = UpdateStatement('table') - us.add_assignment_clause(ListUpdateClause('a', List.Quoter([]), operation='append')) + us.add_assignment_clause(ListUpdateClause('a', [], operation='append')) self.assertEqual(six.text_type(us), 'UPDATE table SET "a" = "a" + %(0)s') From 28359d5ec873ac24388d99f031aa5734dfdbad4d Mon Sep 17 00:00:00 2001 From: GregBestland Date: Wed, 13 May 2015 15:49:35 -0500 Subject: [PATCH 1417/3726] potential fix to travis unit test errors --- tox.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index 6c4aa31a27..d694e7ab03 100644 --- a/tox.ini +++ b/tox.ini @@ -10,8 +10,8 @@ deps = nose [testenv] deps = {[base]deps} + sure==1.2.3 blist - sure setenv = USE_CASS_EXTERNAL=1 commands = {envpython} setup.py build_ext --inplace nosetests --verbosity=2 tests/unit/ From 45a09d36f979f031e8132572f6bdaa929d3124f7 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 13 May 2015 15:56:47 -0500 Subject: [PATCH 1418/3726] Synchronize reconnection_handler cancelation in CC shutdown Fixes a possible race where ControlConnection._reconnection_handler could be removed during shutdown. --- cassandra/cluster.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 436bfd929e..87628c1008 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2024,6 +2024,11 @@ def _submit(self, *args, **kwargs): return None def shutdown(self): + # stop trying to reconnect (if we are) + with self._reconnection_lock: + if self._reconnection_handler: + self._reconnection_handler.cancel() + with self._lock: if self._is_shutdown: return @@ -2031,10 +2036,6 @@ def shutdown(self): self._is_shutdown = True log.debug("Shutting down control connection") - # stop trying to reconnect (if we are) - if self._reconnection_handler: - self._reconnection_handler.cancel() - if self._connection: self._connection.close() del self._connection From e9b948b87577d3882add31d6a82b9ce468670617 Mon Sep 17 00:00:00 2001 From: Kishan Karunaratne Date: Wed, 13 May 2015 16:34:33 -0700 Subject: [PATCH 1419/3726] fixed typo in test harness detecting C* versions --- tests/integration/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 4b3c538145..d47d1d2002 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -119,9 +119,9 @@ def _tuple_version(version_string): log.info('Using Cassandra version: %s', CASSANDRA_VERSION) CCM_KWARGS['version'] = CASSANDRA_VERSION -if CASSANDRA_VERSION > '2.1': +if CASSANDRA_VERSION >= '2.1': default_protocol_version = 3 -elif CASSANDRA_VERSION > '2.0': +elif CASSANDRA_VERSION >= '2.0': default_protocol_version = 2 else: default_protocol_version = 1 From cef495e8563a36152cf23e3b71646d2b7f15b121 Mon Sep 17 00:00:00 2001 From: Kishan Karunaratne Date: Wed, 13 May 2015 16:53:44 -0700 Subject: [PATCH 1420/3726] fixed typo in test harness detecting C* 3.0 version --- tests/integration/__init__.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 3ea397f01e..8933eefd48 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -118,11 +118,11 @@ def _tuple_version(version_string): log.info('Using Cassandra version: %s', CASSANDRA_VERSION) CCM_KWARGS['version'] = CASSANDRA_VERSION -if CASSANDRA_VERSION > '3.0': +if CASSANDRA_VERSION >= '3.0': default_protocol_version = 4 -elif CASSANDRA_VERSION > '2.1': +elif CASSANDRA_VERSION >= '2.1': default_protocol_version = 3 -elif CASSANDRA_VERSION > '2.0': +elif CASSANDRA_VERSION >= '2.0': default_protocol_version = 2 else: default_protocol_version = 1 From 536300ddb9973a1378a2f7b9c6b304dfe5f1c2f9 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 13 May 2015 20:02:48 -0500 Subject: [PATCH 1421/3726] For DefaultConnection, default to gevent only if actually patched. PYTHON-244 Fixes an issue where gevent is chosen based on module loaded, not monkey patching. In some debuggers, (PyCharm, for example), this would cause connecting to timeout because gevent reactor is used without monkey patching. --- cassandra/cluster.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 56e34e97dd..eca637d0e3 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -79,10 +79,16 @@ def _is_eventlet_monkey_patched(): import eventlet.patcher return eventlet.patcher.is_monkey_patched('socket') +def _is_gevent_monkey_patched(): + if 'gevent.monkey' not in sys.modules: + return False + import gevent.socket + return socket.socket is gevent.socket.socket + # default to gevent when we are monkey patched with gevent, eventlet when # monkey patched with eventlet, otherwise if libev is available, use that as # the default because it's fastest. Otherwise, use asyncore. -if 'gevent.monkey' in sys.modules: +if _is_gevent_monkey_patched(): from cassandra.io.geventreactor import GeventConnection as DefaultConnection elif _is_eventlet_monkey_patched(): from cassandra.io.eventletreactor import EventletConnection as DefaultConnection From c4bfe375bab678210cd3826c3eb7e471b81a79c1 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 14 May 2015 10:25:08 -0500 Subject: [PATCH 1422/3726] Warn when UDT registered, but protocol_version < 3 PYTHON-305 --- cassandra/cluster.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 56e34e97dd..0c640988eb 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -624,6 +624,11 @@ def __init__(self, street, zipcode): print row.id, row.location.street, row.location.zipcode """ + if self.protocol_version < 3: + log.warning("User Type serialization is only supported in native protocol version 3+ (%d in use). " + "CQL encoding for simple statements will still work, but named tuples will " + "be returned when reading type %s.%s.", self.protocol_version, keyspace, user_type) + self._user_types[keyspace][user_type] = klass for session in self.sessions: session.user_type_registered(keyspace, user_type, klass) From 7718544a626c7188725c1738f80ed06e48cbdb31 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 14 May 2015 11:05:18 -0500 Subject: [PATCH 1423/3726] Deprecate Cluster.refresh_schema and submit_refresh_schema PYTHON-291 Use Cluster.refresh_*_metadata instead --- cassandra/cluster.py | 57 ++++++++++++++++++++++++++++++++++ docs/api/cassandra/cluster.rst | 8 +++++ 2 files changed, 65 insertions(+) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 56e34e97dd..c48a1ebffd 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1131,6 +1131,9 @@ def _ensure_core_connections(self): def refresh_schema(self, keyspace=None, table=None, usertype=None, max_schema_agreement_wait=None): """ + .. deprecated:: 2.6.0 + Use refresh_*_metadata instead + Synchronously refresh the schema metadata. By default, the timeout for this operation is governed by :attr:`~.Cluster.max_schema_agreement_wait` @@ -1142,18 +1145,72 @@ def refresh_schema(self, keyspace=None, table=None, usertype=None, max_schema_ag An Exception is raised if schema refresh fails for any reason. """ + msg = "refresh_schema is deprecated. Use Cluster.refresh_*_metadata instead." + warnings.warn(msg, DeprecationWarning) + log.warning(msg) if not self.control_connection.refresh_schema(keyspace, table, usertype, max_schema_agreement_wait): raise Exception("Schema was not refreshed. See log for details.") def submit_schema_refresh(self, keyspace=None, table=None, usertype=None): """ + .. deprecated:: 2.6.0 + Use refresh_*_metadata instead + Schedule a refresh of the internal representation of the current schema for this cluster. If `keyspace` is specified, only that keyspace will be refreshed, and likewise for `table`. """ + msg = "submit_schema_refresh is deprecated. Use Cluster.refresh_*_metadata instead." + warnings.warn(msg, DeprecationWarning) + log.warning(msg) return self.executor.submit( self.control_connection.refresh_schema, keyspace, table, usertype) + def refresh_schema_metadata(self, max_schema_agreement_wait=None): + """ + Synchronously refresh all schema metadata. + + By default, the timeout for this operation is governed by :attr:`~.Cluster.max_schema_agreement_wait` + and :attr:`~.Cluster.control_connection_timeout`. + + Passing max_schema_agreement_wait here overrides :attr:`~.Cluster.max_schema_agreement_wait`. + + Setting max_schema_agreement_wait <= 0 will bypass schema agreement and refresh schema immediately. + + An Exception is raised if schema refresh fails for any reason. + """ + if not self.control_connection.refresh_schema(schema_agreement_wait=max_schema_agreement_wait): + raise Exception("Schema metadata was not refreshed. See log for details.") + + def refresh_keyspace_metadata(self, keyspace, max_schema_agreement_wait=None): + """ + Synchronously refresh keyspace metadata. This applies to keyspace-level information such as replication + and durability settings. It does not refresh tables, types, etc. contained in the keyspace. + + See :meth:`~.Cluster.refresh_schema_metadata` for description of ``max_schema_agreement_wait`` behavior + """ + if not self.control_connection.refresh_schema(keyspace, schema_agreement_wait=max_schema_agreement_wait): + raise Exception("Keyspace metadata was not refreshed. See log for details.") + + def refresh_table_metadata(self, keyspace, table, max_schema_agreement_wait=None): + """ + Synchronously refresh table metadata. This applies to a table, and any triggers or indexes attached + to the table. + + See :meth:`~.Cluster.refresh_schema_metadata` for description of ``max_schema_agreement_wait`` behavior + """ + if not self.control_connection.refresh_schema(keyspace, table, schema_agreement_wait=max_schema_agreement_wait): + raise Exception("Table metadata was not refreshed. See log for details.") + + def refresh_type_metadata(self, keyspace, user_type, max_schema_agreement_wait=None): + """ + Synchronously refresh user defined type metadata. + + See :meth:`~.Cluster.refresh_schema_metadata` for description of ``max_schema_agreement_wait`` behavior + """ + if not self.control_connection.refresh_schema(keyspace, usertype=user_type, schema_agreement_wait=max_schema_agreement_wait): + raise Exception("User Type metadata was not refreshed. See log for details.") + def refresh_nodes(self): """ Synchronously refresh the node list and token metadata diff --git a/docs/api/cassandra/cluster.rst b/docs/api/cassandra/cluster.rst index 6532c9a026..886d07160e 100644 --- a/docs/api/cassandra/cluster.rst +++ b/docs/api/cassandra/cluster.rst @@ -65,6 +65,14 @@ .. automethod:: set_max_connections_per_host + .. automethod:: refresh_schema_metadata + + .. automethod:: refresh_keyspace_metadata + + .. automethod:: refresh_table_metadata + + .. automethod:: refresh_type_metadata + .. automethod:: refresh_schema .. automethod:: refresh_nodes From 12bb183d367bd2d96b8e52639246effefa5712d2 Mon Sep 17 00:00:00 2001 From: blerer Date: Tue, 12 May 2015 10:18:09 +0200 Subject: [PATCH 1424/3726] Add support for byte and smallint types --- cassandra/cqltypes.py | 25 ++++++++++++++++++++++++- cassandra/protocol.py | 4 +++- tests/integration/datatype_utils.py | 8 ++++++++ tests/unit/test_marshalling.py | 6 +++++- tests/unit/test_types.py | 6 +++++- 5 files changed, 45 insertions(+), 4 deletions(-) diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index 5b95d4c04b..48cbc198c1 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -44,7 +44,7 @@ import warnings -from cassandra.marshal import (int8_pack, int8_unpack, +from cassandra.marshal import (int8_pack, int8_unpack, int16_pack, int16_unpack, uint16_pack, uint16_unpack, uint32_pack, uint32_unpack, int32_pack, int32_unpack, int64_pack, int64_unpack, float_pack, float_unpack, double_pack, double_unpack, @@ -424,6 +424,17 @@ def deserialize(byts, protocol_version): def serialize(truth, protocol_version): return int8_pack(truth) +class TinyIntType(_CassandraType): + typename = 'tinyint' + + @staticmethod + def deserialize(byts, protocol_version): + return int8_unpack(byts) + + @staticmethod + def serialize(byts, protocol_version): + return int8_pack(byts) + if six.PY2: class AsciiType(_CassandraType): @@ -650,6 +661,18 @@ def deserialize(byts, protocol_version): return util.Date(days) +class SmallIntType(_CassandraType): + typename = 'smallint' + + @staticmethod + def deserialize(byts, protocol_version): + return int16_unpack(byts) + + @staticmethod + def serialize(byts, protocol_version): + return int16_pack(byts) + + class TimeType(_CassandraType): typename = 'time' diff --git a/cassandra/protocol.py b/cassandra/protocol.py index bd929dad52..031501bee0 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -34,7 +34,7 @@ LongType, MapType, SetType, TimeUUIDType, UTF8Type, UUIDType, UserType, TupleType, lookup_casstype, SimpleDateType, - TimeType) + TimeType, TinyIntType, SmallIntType) from cassandra.policies import WriteType log = logging.getLogger(__name__) @@ -533,6 +533,8 @@ class ResultMessage(_MessageType): 0x0010: InetAddressType, 0x0011: SimpleDateType, 0x0012: TimeType, + 0x0013: SmallIntType, + 0x0014: TinyIntType, 0x0020: ListType, 0x0021: MapType, 0x0022: SetType, diff --git a/tests/integration/datatype_utils.py b/tests/integration/datatype_utils.py index fb437d7539..a7d8aeb1f8 100644 --- a/tests/integration/datatype_utils.py +++ b/tests/integration/datatype_utils.py @@ -60,6 +60,8 @@ def update_datatypes(): if _cass_version >= (3, 0, 0): PRIMITIVE_DATATYPES.append('date') PRIMITIVE_DATATYPES.append('time') + PRIMITIVE_DATATYPES.append('tinyint') + PRIMITIVE_DATATYPES.append('smallint') def get_sample_data(): @@ -117,6 +119,12 @@ def get_sample_data(): elif datatype == 'time': sample_data[datatype] = time(16, 47, 25, 7) + elif datatype == 'tinyint': + sample_data[datatype] = 123 + + elif datatype == 'smallint': + sample_data[datatype] = 32523 + else: raise Exception("Missing handling of {0}".format(datatype)) diff --git a/tests/unit/test_marshalling.py b/tests/unit/test_marshalling.py index bb9a406882..eeefb0f174 100644 --- a/tests/unit/test_marshalling.py +++ b/tests/unit/test_marshalling.py @@ -81,7 +81,11 @@ (b'\x00\x01\x00\x10\xafYC\xa3\xea<\x11\xe1\xabc\xc4,\x03"y\xf0', 'ListType(TimeUUIDType)', [UUID(bytes=b'\xafYC\xa3\xea<\x11\xe1\xabc\xc4,\x03"y\xf0')]), (b'\x80\x00\x00\x01', 'SimpleDateType', Date(1)), (b'\x7f\xff\xff\xff', 'SimpleDateType', Date('1969-12-31')), - (b'\x00\x00\x00\x00\x00\x00\x00\x01', 'TimeType', Time(1)) + (b'\x00\x00\x00\x00\x00\x00\x00\x01', 'TimeType', Time(1)), + (b'\x7f', 'TinyIntType', 127), + (b'\xff\xff\xff\x80', 'TinyIntType', -128), + (b'\xff\xff\x80\x00', 'SmallIntType', 32767), + (b'\xff\xff\x80\x00', 'SmallIntType', -32768) ) ordered_map_value = OrderedMapSerializedKey(UTF8Type, 2) diff --git a/tests/unit/test_types.py b/tests/unit/test_types.py index bc0eac2da0..4386380359 100644 --- a/tests/unit/test_types.py +++ b/tests/unit/test_types.py @@ -26,7 +26,7 @@ from cassandra.cqltypes import (BooleanType, lookup_casstype_simple, lookup_casstype, LongType, DecimalType, SetType, cql_typename, CassandraType, UTF8Type, parse_casstype_args, - SimpleDateType, TimeType, + SimpleDateType, TimeType, TinyIntType, SmallIntType, EmptyValue, _CassandraType, DateType, int64_pack) from cassandra.encoder import cql_quote from cassandra.protocol import (write_string, read_longstring, write_stringmap, @@ -55,6 +55,8 @@ def test_lookup_casstype_simple(self): self.assertEqual(lookup_casstype_simple('UTF8Type'), cassandra.cqltypes.UTF8Type) self.assertEqual(lookup_casstype_simple('DateType'), cassandra.cqltypes.DateType) self.assertEqual(lookup_casstype_simple('SimpleDateType'), cassandra.cqltypes.SimpleDateType) + self.assertEqual(lookup_casstype_simple('TinyIntType'), cassandra.cqltypes.TinyIntType) + self.assertEqual(lookup_casstype_simple('SmallIntType'), cassandra.cqltypes.SmallIntType) self.assertEqual(lookup_casstype_simple('TimeUUIDType'), cassandra.cqltypes.TimeUUIDType) self.assertEqual(lookup_casstype_simple('TimeType'), cassandra.cqltypes.TimeType) self.assertEqual(lookup_casstype_simple('UUIDType'), cassandra.cqltypes.UUIDType) @@ -87,6 +89,8 @@ def test_lookup_casstype(self): self.assertEqual(lookup_casstype('UTF8Type'), cassandra.cqltypes.UTF8Type) self.assertEqual(lookup_casstype('DateType'), cassandra.cqltypes.DateType) self.assertEqual(lookup_casstype('TimeType'), cassandra.cqltypes.TimeType) + self.assertEqual(lookup_casstype('TinyIntType'), cassandra.cqltypes.TinyIntType) + self.assertEqual(lookup_casstype('SmallIntType'), cassandra.cqltypes.SmallIntType) self.assertEqual(lookup_casstype('TimeUUIDType'), cassandra.cqltypes.TimeUUIDType) self.assertEqual(lookup_casstype('UUIDType'), cassandra.cqltypes.UUIDType) self.assertEqual(lookup_casstype('IntegerType'), cassandra.cqltypes.IntegerType) From dfa91b8bd5d6dcfe13ce3f4caf4eb721a7dd3d18 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 15 May 2015 14:46:06 -0500 Subject: [PATCH 1425/3726] Revert "Merge pull request #298 from datastax/PYTHON-108" Taking out timers in the interest of accelerating C* 2.2 features for a release. This reverts commit f879700dee6045b0c748a233253cf06bcce625e1, reversing changes made to fe4d2eaf2dd34809d823ab319a0cf82e4bd0bfa6. --- cassandra/cluster.py | 93 +++++++++----------- cassandra/connection.py | 106 ++--------------------- cassandra/io/asyncorereactor.py | 64 +++++++++----- cassandra/io/eventletreactor.py | 53 +++++------- cassandra/io/geventreactor.py | 56 +++++------- cassandra/io/libevreactor.py | 60 ++++++------- cassandra/io/libevwrapper.c | 133 ----------------------------- cassandra/io/twistedreactor.py | 62 ++++++-------- cassandra/query.py | 6 +- docs/object_mapper.rst | 2 +- tests/unit/test_connection.py | 3 + tests/unit/test_response_future.py | 22 ++--- 12 files changed, 201 insertions(+), 459 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 56e34e97dd..8f41948681 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -27,7 +27,6 @@ import sys import time from threading import Lock, RLock, Thread, Event -import warnings import six from six.moves import range @@ -72,7 +71,6 @@ BatchStatement, bind_params, QueryTrace, Statement, named_tuple_factory, dict_factory, FETCH_SIZE_UNSET) - def _is_eventlet_monkey_patched(): if 'eventlet.patcher' not in sys.modules: return False @@ -1267,7 +1265,8 @@ class Session(object): """ A default timeout, measured in seconds, for queries executed through :meth:`.execute()` or :meth:`.execute_async()`. This default may be - overridden with the `timeout` parameter for either of those methods. + overridden with the `timeout` parameter for either of those methods + or the `timeout` parameter for :meth:`.ResponseFuture.result()`. Setting this to :const:`None` will cause no timeouts to be set by default. @@ -1402,14 +1401,17 @@ def execute(self, query, parameters=None, timeout=_NOT_SET, trace=False): trace details, the :attr:`~.Statement.trace` attribute will be left as :const:`None`. """ + if timeout is _NOT_SET: + timeout = self.default_timeout + if trace and not isinstance(query, Statement): raise TypeError( "The query argument must be an instance of a subclass of " "cassandra.query.Statement when trace=True") - future = self.execute_async(query, parameters, trace, timeout) + future = self.execute_async(query, parameters, trace) try: - result = future.result() + result = future.result(timeout) finally: if trace: try: @@ -1419,7 +1421,7 @@ def execute(self, query, parameters=None, timeout=_NOT_SET, trace=False): return result - def execute_async(self, query, parameters=None, trace=False, timeout=_NOT_SET): + def execute_async(self, query, parameters=None, trace=False): """ Execute the given query and return a :class:`~.ResponseFuture` object which callbacks may be attached to for asynchronous response @@ -1456,14 +1458,11 @@ def execute_async(self, query, parameters=None, trace=False, timeout=_NOT_SET): ... log.exception("Operation failed:") """ - if timeout is _NOT_SET: - timeout = self.default_timeout - - future = self._create_response_future(query, parameters, trace, timeout) + future = self._create_response_future(query, parameters, trace) future.send_request() return future - def _create_response_future(self, query, parameters, trace, timeout): + def _create_response_future(self, query, parameters, trace): """ Returns the ResponseFuture before calling send_request() on it """ prepared_statement = None @@ -1514,7 +1513,7 @@ def _create_response_future(self, query, parameters, trace, timeout): message.tracing = True return ResponseFuture( - self, message, query, timeout, metrics=self._metrics, + self, message, query, self.default_timeout, metrics=self._metrics, prepared_statement=prepared_statement) def prepare(self, query): @@ -1544,10 +1543,10 @@ def prepare(self, query): Preparing the same query more than once will likely affect performance. """ message = PrepareMessage(query=query) - future = ResponseFuture(self, message, query=None, timeout=self.default_timeout) + future = ResponseFuture(self, message, query=None) try: future.send_request() - query_id, column_metadata = future.result() + query_id, column_metadata = future.result(self.default_timeout) except Exception: log.exception("Error preparing query:") raise @@ -1572,7 +1571,7 @@ def prepare_on_all_hosts(self, query, excluded_host): futures = [] for host in self._pools.keys(): if host != excluded_host and host.is_up: - future = ResponseFuture(self, PrepareMessage(query=query), None, self.default_timeout) + future = ResponseFuture(self, PrepareMessage(query=query), None) # we don't care about errors preparing against specific hosts, # since we can always prepare them as needed when the prepared @@ -1593,7 +1592,7 @@ def prepare_on_all_hosts(self, query, excluded_host): for host, future in futures: try: - future.result() + future.result(self.default_timeout) except Exception: log.exception("Error preparing query for host %s:", host) @@ -2580,14 +2579,13 @@ class ResponseFuture(object): _start_time = None _metrics = None _paging_state = None - _timer = None - def __init__(self, session, message, query, timeout, metrics=None, prepared_statement=None): + def __init__(self, session, message, query, default_timeout=None, metrics=None, prepared_statement=None): self.session = session self.row_factory = session.row_factory self.message = message self.query = query - self.timeout = timeout + self.default_timeout = default_timeout self._metrics = metrics self.prepared_statement = prepared_statement self._callback_lock = Lock() @@ -2598,18 +2596,6 @@ def __init__(self, session, message, query, timeout, metrics=None, prepared_stat self._errors = {} self._callbacks = [] self._errbacks = [] - self._start_timer() - - def _start_timer(self): - if self.timeout is not None: - self._timer = self.session.cluster.connection_class.create_timer(self.timeout, self._on_timeout) - - def _cancel_timer(self): - if self._timer: - self._timer.cancel() - - def _on_timeout(self): - self._set_final_exception(OperationTimedOut(self._errors, self._current_host)) def _make_query_plan(self): # convert the list/generator/etc to an iterator so that subsequent @@ -2698,7 +2684,6 @@ def start_fetching_next_page(self): self._event.clear() self._final_result = _NOT_SET self._final_exception = None - self._start_timer() self.send_request() def _reprepare(self, prepare_message): @@ -2905,7 +2890,6 @@ def _execute_after_prepare(self, response): "statement on host %s: %s" % (self._current_host, response))) def _set_final_result(self, response): - self._cancel_timer() if self._metrics is not None: self._metrics.request_timer.addValue(time.time() - self._start_time) @@ -2920,7 +2904,6 @@ def _set_final_result(self, response): fn(response, *args, **kwargs) def _set_final_exception(self, response): - self._cancel_timer() if self._metrics is not None: self._metrics.request_timer.addValue(time.time() - self._start_time) @@ -2964,11 +2947,6 @@ def result(self, timeout=_NOT_SET): encountered. If the final result or error has not been set yet, this method will block until that time. - .. versionchanged:: 2.6.0 - - **`timeout` is deprecated. Use timeout in the Session execute functions instead. - The following description applies to deprecated behavior:** - You may set a timeout (in seconds) with the `timeout` parameter. By default, the :attr:`~.default_timeout` for the :class:`.Session` this was created through will be used for the timeout on this @@ -2982,6 +2960,11 @@ def result(self, timeout=_NOT_SET): This is a client-side timeout. For more information about server-side coordinator timeouts, see :class:`.policies.RetryPolicy`. + **Important**: This timeout currently has no effect on callbacks registered + on a :class:`~.ResponseFuture` through :meth:`.ResponseFuture.add_callback` or + :meth:`.ResponseFuture.add_errback`; even if a query exceeds this default + timeout, neither the registered callback or errback will be called. + Example usage:: >>> future = session.execute_async("SELECT * FROM mycf") @@ -2995,24 +2978,27 @@ def result(self, timeout=_NOT_SET): ... log.exception("Operation failed:") """ - if timeout is not _NOT_SET: - msg = "ResponseFuture.result timeout argument is deprecated. Specify the request timeout via Session.execute[_async]." - warnings.warn(msg, DeprecationWarning) - log.warning(msg) - else: - timeout = None + if timeout is _NOT_SET: + timeout = self.default_timeout - self._event.wait(timeout) - # TODO: remove this conditional when deprecated timeout parameter is removed - if not self._event.is_set(): - self._on_timeout() if self._final_result is not _NOT_SET: if self._paging_state is None: return self._final_result else: - return PagedResult(self, self._final_result) - else: + return PagedResult(self, self._final_result, timeout) + elif self._final_exception: raise self._final_exception + else: + self._event.wait(timeout=timeout) + if self._final_result is not _NOT_SET: + if self._paging_state is None: + return self._final_result + else: + return PagedResult(self, self._final_result, timeout) + elif self._final_exception: + raise self._final_exception + else: + raise OperationTimedOut(errors=self._errors, last_host=self._current_host) def get_query_trace(self, max_wait=None): """ @@ -3162,9 +3148,10 @@ class will be returned. response_future = None - def __init__(self, response_future, initial_response): + def __init__(self, response_future, initial_response, timeout=_NOT_SET): self.response_future = response_future self.current_response = iter(initial_response) + self.timeout = timeout def __iter__(self): return self @@ -3177,7 +3164,7 @@ def next(self): raise self.response_future.start_fetching_next_page() - result = self.response_future.result() + result = self.response_future.result(self.timeout) if self.response_future.has_more_pages: self.current_response = result.current_response else: diff --git a/cassandra/connection.py b/cassandra/connection.py index 0d0c024b0d..c239313c45 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -16,12 +16,11 @@ from collections import defaultdict, deque import errno from functools import wraps, partial -from heapq import heappush, heappop import io import logging import os import sys -from threading import Thread, Event, RLock, Lock +from threading import Thread, Event, RLock import time if 'gevent.monkey' in sys.modules: @@ -39,8 +38,7 @@ QueryMessage, ResultMessage, decode_response, InvalidRequestException, SupportedMessage, AuthResponseMessage, AuthChallengeMessage, - AuthSuccessMessage, ProtocolException, - RegisterMessage) + AuthSuccessMessage, ProtocolException) from cassandra.util import OrderedDict @@ -194,7 +192,6 @@ def __init__(self, host='127.0.0.1', port=9042, authenticator=None, self.is_control_connection = is_control_connection self.user_type_map = user_type_map self._push_watchers = defaultdict(set) - self._callbacks = {} self._iobuf = io.BytesIO() if protocol_version >= 3: self._header_unpack = v3_header_unpack @@ -225,7 +222,6 @@ def __init__(self, host='127.0.0.1', port=9042, authenticator=None, self._full_header_length = self._header_length + 4 self.lock = RLock() - self.connected_event = Event() @classmethod def initialize_reactor(self): @@ -260,10 +256,6 @@ def factory(cls, host, timeout, *args, **kwargs): else: return conn - @classmethod - def create_timer(cls, timeout, callback): - raise NotImplementedError() - def close(self): raise NotImplementedError() @@ -375,24 +367,11 @@ def wait_for_responses(self, *msgs, **kwargs): self.defunct(exc) raise - def register_watcher(self, event_type, callback, register_timeout=None): - """ - Register a callback for a given event type. - """ - self._push_watchers[event_type].add(callback) - self.wait_for_response( - RegisterMessage(event_list=[event_type]), - timeout=register_timeout) + def register_watcher(self, event_type, callback): + raise NotImplementedError() - def register_watchers(self, type_callback_dict, register_timeout=None): - """ - Register multiple callback/event type pairs, expressed as a dict. - """ - for event_type, callback in type_callback_dict.items(): - self._push_watchers[event_type].add(callback) - self.wait_for_response( - RegisterMessage(event_list=type_callback_dict.keys()), - timeout=register_timeout) + def register_watchers(self, type_callback_dict): + raise NotImplementedError() def control_conn_disposed(self): self.is_control_connection = False @@ -892,76 +871,3 @@ def stop(self): def _raise_if_stopped(self): if self._shutdown_event.is_set(): raise self.ShutdownException() - - -class Timer(object): - - canceled = False - - def __init__(self, timeout, callback): - self.end = time.time() + timeout - self.callback = callback - if timeout < 0: - self.callback() - - def cancel(self): - self.canceled = True - - def finish(self, time_now): - if self.canceled: - return True - - if time_now >= self.end: - self.callback() - return True - - return False - - -class TimerManager(object): - - def __init__(self): - self._queue = [] - self._new_timers = [] - - def add_timer(self, timer): - """ - called from client thread with a Timer object - """ - self._new_timers.append((timer.end, timer)) - - def service_timeouts(self): - """ - run callbacks on all expired timers - Called from the event thread - :return: next end time, or None - """ - queue = self._queue - new_timers = self._new_timers - while self._new_timers: - heappush(queue, new_timers.pop()) - now = time.time() - while queue: - try: - timer = queue[0][1] - if timer.finish(now): - heappop(queue) - else: - return timer.end - except Exception: - log.exception("Exception while servicing timeout callback: ") - - @property - def next_timeout(self): - try: - return self._queue[0][0] - except IndexError: - pass - - @property - def next_offset(self): - try: - next_end = self._queue[0][0] - return next_end - time.time() - except IndexError: - pass diff --git a/cassandra/io/asyncorereactor.py b/cassandra/io/asyncorereactor.py index b815d20aed..ef687c388c 100644 --- a/cassandra/io/asyncorereactor.py +++ b/cassandra/io/asyncorereactor.py @@ -19,12 +19,11 @@ import socket import sys from threading import Event, Lock, Thread -import time import weakref from six.moves import range -from errno import EALREADY, EINPROGRESS, EWOULDBLOCK, EINVAL, EISCONN +from errno import EALREADY, EINPROGRESS, EWOULDBLOCK, EINVAL, EISCONN, errorcode try: from weakref import WeakSet except ImportError: @@ -37,9 +36,10 @@ except ImportError: ssl = None # NOQA +from cassandra import OperationTimedOut from cassandra.connection import (Connection, ConnectionShutdown, - ConnectionException, NONBLOCKING, - Timer, TimerManager) + ConnectionException, NONBLOCKING) +from cassandra.protocol import RegisterMessage log = logging.getLogger(__name__) @@ -55,17 +55,15 @@ def _cleanup(loop_weakref): class AsyncoreLoop(object): - def __init__(self): self._pid = os.getpid() self._loop_lock = Lock() self._started = False self._shutdown = False + self._conns_lock = Lock() + self._conns = WeakSet() self._thread = None - - self._timers = TimerManager() - atexit.register(partial(_cleanup, weakref.ref(self))) def maybe_start(self): @@ -88,22 +86,24 @@ def maybe_start(self): def _run_loop(self): log.debug("Starting asyncore event loop") with self._loop_lock: - while not self._shutdown: + while True: try: - asyncore.loop(timeout=0.001, use_poll=True, count=100) - self._timers.service_timeouts() - if not asyncore.socket_map: - time.sleep(0.005) + asyncore.loop(timeout=0.001, use_poll=True, count=1000) except Exception: log.debug("Asyncore event loop stopped unexepectedly", exc_info=True) break + + if self._shutdown: + break + + with self._conns_lock: + if len(self._conns) == 0: + break + self._started = False log.debug("Asyncore event loop ended") - def add_timer(self, timer): - self._timers.add_timer(timer) - def _cleanup(self): self._shutdown = True if not self._thread: @@ -118,6 +118,14 @@ def _cleanup(self): log.debug("Event loop thread was joined") + def connection_created(self, connection): + with self._conns_lock: + self._conns.add(connection) + + def connection_destroyed(self, connection): + with self._conns_lock: + self._conns.discard(connection) + class AsyncoreConnection(Connection, asyncore.dispatcher): """ @@ -148,19 +156,18 @@ def handle_fork(cls): cls._loop._cleanup() cls._loop = None - @classmethod - def create_timer(cls, timeout, callback): - timer = Timer(timeout, callback) - cls._loop.add_timer(timer) - return timer - def __init__(self, *args, **kwargs): Connection.__init__(self, *args, **kwargs) asyncore.dispatcher.__init__(self) + self.connected_event = Event() + + self._callbacks = {} self.deque = deque() self.deque_lock = Lock() + self._loop.connection_created(self) + sockerr = None addresses = socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM) for (af, socktype, proto, canonname, sockaddr) in addresses: @@ -233,6 +240,8 @@ def close(self): asyncore.dispatcher.close(self) log.debug("Closed socket to %s", self.host) + self._loop.connection_destroyed(self) + if not self.is_defunct: self.error_all_callbacks( ConnectionShutdown("Connection to %s was closed" % self.host)) @@ -314,3 +323,14 @@ def writable(self): def readable(self): return self._readable or (self.is_control_connection and not (self.is_defunct or self.is_closed)) + + def register_watcher(self, event_type, callback, register_timeout=None): + self._push_watchers[event_type].add(callback) + self.wait_for_response( + RegisterMessage(event_list=[event_type]), timeout=register_timeout) + + def register_watchers(self, type_callback_dict, register_timeout=None): + for event_type, callback in type_callback_dict.items(): + self._push_watchers[event_type].add(callback) + self.wait_for_response( + RegisterMessage(event_list=type_callback_dict.keys()), timeout=register_timeout) diff --git a/cassandra/io/eventletreactor.py b/cassandra/io/eventletreactor.py index aceac55e10..670d0f1865 100644 --- a/cassandra/io/eventletreactor.py +++ b/cassandra/io/eventletreactor.py @@ -16,6 +16,7 @@ # Originally derived from MagnetoDB source: # https://github.com/stackforge/magnetodb/blob/2015.1.0b1/magnetodb/common/cassandra/io/eventletreactor.py +from collections import defaultdict from errno import EALREADY, EINPROGRESS, EWOULDBLOCK, EINVAL import eventlet from eventlet.green import select, socket @@ -24,11 +25,12 @@ import logging import os from threading import Event -import time from six.moves import xrange -from cassandra.connection import Connection, ConnectionShutdown, Timer, TimerManager +from cassandra import OperationTimedOut +from cassandra.connection import Connection, ConnectionShutdown +from cassandra.protocol import RegisterMessage log = logging.getLogger(__name__) @@ -51,45 +53,19 @@ class EventletConnection(Connection): _write_watcher = None _socket = None - _timers = None - _timeout_watcher = None - _new_timer = None - @classmethod def initialize_reactor(cls): eventlet.monkey_patch() - if not cls._timers: - cls._timers = TimerManager() - cls._timeout_watcher = eventlet.spawn(cls.service_timeouts) - cls._new_timer = Event() - - @classmethod - def create_timer(cls, timeout, callback): - timer = Timer(timeout, callback) - cls._timers.add_timer(timer) - cls._new_timer.set() - return timer - - @classmethod - def service_timeouts(cls): - """ - cls._timeout_watcher runs in this loop forever. - It is usually waiting for the next timeout on the cls._new_timer Event. - When new timers are added, that event is set so that the watcher can - wake up and possibly set an earlier timeout. - """ - timer_manager = cls._timers - while True: - next_end = timer_manager.service_timeouts() - sleep_time = max(next_end - time.time(), 0) if next_end else 10000 - cls._new_timer.wait(sleep_time) - cls._new_timer.clear() def __init__(self, *args, **kwargs): Connection.__init__(self, *args, **kwargs) + self.connected_event = Event() self._write_queue = Queue() + self._callbacks = {} + self._push_watchers = defaultdict(set) + sockerr = None addresses = socket.getaddrinfo( self.host, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM @@ -189,3 +165,16 @@ def push(self, data): chunk_size = self.out_buffer_size for i in xrange(0, len(data), chunk_size): self._write_queue.put(data[i:i + chunk_size]) + + def register_watcher(self, event_type, callback, register_timeout=None): + self._push_watchers[event_type].add(callback) + self.wait_for_response( + RegisterMessage(event_list=[event_type]), + timeout=register_timeout) + + def register_watchers(self, type_callback_dict, register_timeout=None): + for event_type, callback in type_callback_dict.items(): + self._push_watchers[event_type].add(callback) + self.wait_for_response( + RegisterMessage(event_list=type_callback_dict.keys()), + timeout=register_timeout) diff --git a/cassandra/io/geventreactor.py b/cassandra/io/geventreactor.py index 84285957bb..6e9af0da4d 100644 --- a/cassandra/io/geventreactor.py +++ b/cassandra/io/geventreactor.py @@ -13,20 +13,21 @@ # limitations under the License. import gevent from gevent import select, socket, ssl -import gevent.event +from gevent.event import Event from gevent.queue import Queue from collections import defaultdict from functools import partial import logging import os -import time -from six.moves import range +from six.moves import xrange from errno import EALREADY, EINPROGRESS, EWOULDBLOCK, EINVAL -from cassandra.connection import Connection, ConnectionShutdown, Timer, TimerManager +from cassandra import OperationTimedOut +from cassandra.connection import Connection, ConnectionShutdown +from cassandra.protocol import RegisterMessage log = logging.getLogger(__name__) @@ -49,39 +50,15 @@ class GeventConnection(Connection): _write_watcher = None _socket = None - _timers = None - _timeout_watcher = None - _new_timer = None - - @classmethod - def initialize_reactor(cls): - if not cls._timers: - cls._timers = TimerManager() - cls._timeout_watcher = gevent.spawn(cls.service_timeouts) - cls._new_timer = gevent.event.Event() - - @classmethod - def create_timer(cls, timeout, callback): - timer = Timer(timeout, callback) - cls._timers.add_timer(timer) - cls._new_timer.set() - return timer - - @classmethod - def service_timeouts(cls): - timer_manager = cls._timers - timer_event = cls._new_timer - while True: - next_end = timer_manager.service_timeouts() - sleep_time = max(next_end - time.time(), 0) if next_end else 10000 - timer_event.wait(sleep_time) - timer_event.clear() - def __init__(self, *args, **kwargs): Connection.__init__(self, *args, **kwargs) + self.connected_event = Event() self._write_queue = Queue() + self._callbacks = {} + self._push_watchers = defaultdict(set) + sockerr = None addresses = socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM) for (af, socktype, proto, canonname, sockaddr) in addresses: @@ -182,5 +159,18 @@ def handle_read(self): def push(self, data): chunk_size = self.out_buffer_size - for i in range(0, len(data), chunk_size): + for i in xrange(0, len(data), chunk_size): self._write_queue.put(data[i:i + chunk_size]) + + def register_watcher(self, event_type, callback, register_timeout=None): + self._push_watchers[event_type].add(callback) + self.wait_for_response( + RegisterMessage(event_list=[event_type]), + timeout=register_timeout) + + def register_watchers(self, type_callback_dict, register_timeout=None): + for event_type, callback in type_callback_dict.items(): + self._push_watchers[event_type].add(callback) + self.wait_for_response( + RegisterMessage(event_list=type_callback_dict.keys()), + timeout=register_timeout) diff --git a/cassandra/io/libevreactor.py b/cassandra/io/libevreactor.py index e6abb76b41..93b4c97854 100644 --- a/cassandra/io/libevreactor.py +++ b/cassandra/io/libevreactor.py @@ -20,10 +20,11 @@ from threading import Event, Lock, Thread import weakref -from six.moves import range +from six.moves import xrange -from cassandra.connection import (Connection, ConnectionShutdown, - NONBLOCKING, Timer, TimerManager) +from cassandra import OperationTimedOut +from cassandra.connection import Connection, ConnectionShutdown, NONBLOCKING +from cassandra.protocol import RegisterMessage try: import cassandra.io.libevwrapper as libev except ImportError: @@ -39,7 +40,7 @@ try: import ssl except ImportError: - ssl = None # NOQA + ssl = None # NOQA log = logging.getLogger(__name__) @@ -49,6 +50,7 @@ def _cleanup(loop_weakref): loop = loop_weakref() except ReferenceError: return + loop._cleanup() @@ -83,11 +85,11 @@ def __init__(self): self._loop.unref() self._preparer.start() - self._timers = TimerManager() - self._loop_timer = libev.Timer(self._loop, self._on_loop_timer) - atexit.register(partial(_cleanup, weakref.ref(self))) + def notify(self): + self._notifier.send() + def maybe_start(self): should_start = False with self._lock: @@ -131,7 +133,6 @@ def _cleanup(self): conn._read_watcher.stop() del conn._read_watcher - self.notify() # wake the timer watcher log.debug("Waiting for event loop thread to join...") self._thread.join(timeout=1.0) if self._thread.is_alive(): @@ -142,24 +143,6 @@ def _cleanup(self): log.debug("Event loop thread was joined") self._loop = None - def add_timer(self, timer): - self._timers.add_timer(timer) - self._notifier.send() # wake up in case this timer is earlier - - def _update_timer(self): - if not self._shutdown: - self._timers.service_timeouts() - offset = self._timers.next_offset or 100000 # none pending; will be updated again when something new happens - self._loop_timer.start(offset) - else: - self._loop_timer.stop() - - def _on_loop_timer(self): - self._timers.service_timeouts() - - def notify(self): - self._notifier.send() - def connection_created(self, conn): with self._conn_set_lock: new_live_conns = self._live_conns.copy() @@ -222,9 +205,6 @@ def _loop_will_run(self, prepare): changed = True - # TODO: update to do connection management, timer updates through dedicaterd async 'notifier' callbacks - self._update_timer() - if changed: self._notifier.send() @@ -256,15 +236,12 @@ def handle_fork(cls): cls._libevloop._cleanup() cls._libevloop = None - @classmethod - def create_timer(cls, timeout, callback): - timer = Timer(timeout, callback) - cls._libevloop.add_timer(timer) - return timer - def __init__(self, *args, **kwargs): Connection.__init__(self, *args, **kwargs) + self.connected_event = Event() + + self._callbacks = {} self.deque = deque() self._deque_lock = Lock() @@ -384,7 +361,7 @@ def push(self, data): sabs = self.out_buffer_size if len(data) > sabs: chunks = [] - for i in range(0, len(data), sabs): + for i in xrange(0, len(data), sabs): chunks.append(data[i:i + sabs]) else: chunks = [data] @@ -392,3 +369,14 @@ def push(self, data): with self._deque_lock: self.deque.extend(chunks) self._libevloop.notify() + + def register_watcher(self, event_type, callback, register_timeout=None): + self._push_watchers[event_type].add(callback) + self.wait_for_response( + RegisterMessage(event_list=[event_type]), timeout=register_timeout) + + def register_watchers(self, type_callback_dict, register_timeout=None): + for event_type, callback in type_callback_dict.items(): + self._push_watchers[event_type].add(callback) + self.wait_for_response( + RegisterMessage(event_list=type_callback_dict.keys()), timeout=register_timeout) diff --git a/cassandra/io/libevwrapper.c b/cassandra/io/libevwrapper.c index 99e1df30f7..cbac83b277 100644 --- a/cassandra/io/libevwrapper.c +++ b/cassandra/io/libevwrapper.c @@ -451,131 +451,6 @@ static PyTypeObject libevwrapper_PrepareType = { (initproc)Prepare_init, /* tp_init */ }; -typedef struct libevwrapper_Timer { - PyObject_HEAD - struct ev_timer timer; - struct libevwrapper_Loop *loop; - PyObject *callback; -} libevwrapper_Timer; - -static void -Timer_dealloc(libevwrapper_Timer *self) { - Py_XDECREF(self->loop); - Py_XDECREF(self->callback); - Py_TYPE(self)->tp_free((PyObject *)self); -} - -static void timer_callback(struct ev_loop *loop, ev_timer *watcher, int revents) { - libevwrapper_Timer *self = watcher->data; - - PyObject *result = NULL; - PyGILState_STATE gstate; - - gstate = PyGILState_Ensure(); - result = PyObject_CallFunction(self->callback, NULL); - if (!result) { - PyErr_WriteUnraisable(self->callback); - } - Py_XDECREF(result); - - PyGILState_Release(gstate); -} - -static int -Timer_init(libevwrapper_Timer *self, PyObject *args, PyObject *kwds) { - PyObject *callback; - PyObject *loop; - - if (!PyArg_ParseTuple(args, "OO", &loop, &callback)) { - return -1; - } - - if (loop) { - Py_INCREF(loop); - self->loop = (libevwrapper_Loop *)loop; - } else { - return -1; - } - - if (callback) { - if (!PyCallable_Check(callback)) { - PyErr_SetString(PyExc_TypeError, "callback parameter must be callable"); - Py_XDECREF(loop); - return -1; - } - Py_INCREF(callback); - self->callback = callback; - } - ev_init(&self->timer, timer_callback); - self->timer.data = self; - return 0; -} - -static PyObject * -Timer_start(libevwrapper_Timer *self, PyObject *args) { - double timeout; - if (!PyArg_ParseTuple(args, "d", &timeout)) { - return NULL; - } - /* some tiny non-zero number to avoid zero, and - make it run immediately for negative timeouts */ - self->timer.repeat = fmax(timeout, 0.000000001); - ev_timer_again(self->loop->loop, &self->timer); - Py_RETURN_NONE; -} - -static PyObject * -Timer_stop(libevwrapper_Timer *self, PyObject *args) { - ev_timer_stop(self->loop->loop, &self->timer); - Py_RETURN_NONE; -} - -static PyMethodDef Timer_methods[] = { - {"start", (PyCFunction)Timer_start, METH_VARARGS, "Start the Timer watcher"}, - {"stop", (PyCFunction)Timer_stop, METH_NOARGS, "Stop the Timer watcher"}, - {NULL} /* Sentinal */ -}; - -static PyTypeObject libevwrapper_TimerType = { - PyVarObject_HEAD_INIT(NULL, 0) - "cassandra.io.libevwrapper.Timer", /*tp_name*/ - sizeof(libevwrapper_Timer), /*tp_basicsize*/ - 0, /*tp_itemsize*/ - (destructor)Timer_dealloc, /*tp_dealloc*/ - 0, /*tp_print*/ - 0, /*tp_getattr*/ - 0, /*tp_setattr*/ - 0, /*tp_compare*/ - 0, /*tp_repr*/ - 0, /*tp_as_number*/ - 0, /*tp_as_sequence*/ - 0, /*tp_as_mapping*/ - 0, /*tp_hash */ - 0, /*tp_call*/ - 0, /*tp_str*/ - 0, /*tp_getattro*/ - 0, /*tp_setattro*/ - 0, /*tp_as_buffer*/ - Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /*tp_flags*/ - "Timer objects", /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - Timer_methods, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - (initproc)Timer_init, /* tp_init */ -}; - - static PyMethodDef module_methods[] = { {NULL} /* Sentinal */ }; @@ -625,10 +500,6 @@ initlibevwrapper(void) if (PyType_Ready(&libevwrapper_AsyncType) < 0) INITERROR; - libevwrapper_TimerType.tp_new = PyType_GenericNew; - if (PyType_Ready(&libevwrapper_TimerType) < 0) - INITERROR; - # if PY_MAJOR_VERSION >= 3 module = PyModule_Create(&moduledef); # else @@ -661,10 +532,6 @@ initlibevwrapper(void) if (PyModule_AddObject(module, "Async", (PyObject *)&libevwrapper_AsyncType) == -1) INITERROR; - Py_INCREF(&libevwrapper_TimerType); - if (PyModule_AddObject(module, "Timer", (PyObject *)&libevwrapper_TimerType) == -1) - INITERROR; - if (!PyEval_ThreadsInitialized()) { PyEval_InitThreads(); } diff --git a/cassandra/io/twistedreactor.py b/cassandra/io/twistedreactor.py index 0f5c841c75..ff81e5613f 100644 --- a/cassandra/io/twistedreactor.py +++ b/cassandra/io/twistedreactor.py @@ -15,15 +15,16 @@ Module that implements an event loop based on twisted ( https://twistedmatrix.com ). """ -import atexit +from twisted.internet import reactor, protocol +from threading import Event, Thread, Lock from functools import partial import logging -from threading import Event, Thread, Lock -import time -from twisted.internet import reactor, protocol import weakref +import atexit -from cassandra.connection import Connection, ConnectionShutdown, Timer, TimerManager +from cassandra import OperationTimedOut +from cassandra.connection import Connection, ConnectionShutdown +from cassandra.protocol import RegisterMessage log = logging.getLogger(__name__) @@ -108,12 +109,9 @@ class TwistedLoop(object): _lock = None _thread = None - _timeout_task = None - _timeout = None def __init__(self): self._lock = Lock() - self._timers = TimerManager() def maybe_start(self): with self._lock: @@ -135,27 +133,6 @@ def _cleanup(self): "Cluster.shutdown() to avoid this.") log.debug("Event loop thread was joined") - def add_timer(self, timer): - self._timers.add_timer(timer) - # callFromThread to schedule from the loop thread, where - # the timeout task can safely be modified - reactor.callFromThread(self._schedule_timeout, timer.end) - - def _schedule_timeout(self, next_timeout): - if next_timeout: - delay = max(next_timeout - time.time(), 0) - if self._timeout_task and self._timeout_task.active(): - if next_timeout < self._timeout: - self._timeout_task.reset(delay) - self._timeout = next_timeout - else: - self._timeout_task = reactor.callLater(delay, self._on_loop_timer) - self._timeout = next_timeout - - def _on_loop_timer(self): - self._timers.service_timeouts() - self._schedule_timeout(self._timers.next_timeout) - class TwistedConnection(Connection): """ @@ -171,12 +148,6 @@ def initialize_reactor(cls): if not cls._loop: cls._loop = TwistedLoop() - @classmethod - def create_timer(cls, timeout, callback): - timer = Timer(timeout, callback) - cls._loop.add_timer(timer) - return timer - def __init__(self, *args, **kwargs): """ Initialization method. @@ -188,9 +159,11 @@ def __init__(self, *args, **kwargs): """ Connection.__init__(self, *args, **kwargs) + self.connected_event = Event() self.is_closed = True self.connector = None + self._callbacks = {} reactor.callFromThread(self.add_connection) self._loop.maybe_start() @@ -247,3 +220,22 @@ def push(self, data): the event loop when it gets the chance. """ reactor.callFromThread(self.connector.transport.write, data) + + def register_watcher(self, event_type, callback, register_timeout=None): + """ + Register a callback for a given event type. + """ + self._push_watchers[event_type].add(callback) + self.wait_for_response( + RegisterMessage(event_list=[event_type]), + timeout=register_timeout) + + def register_watchers(self, type_callback_dict, register_timeout=None): + """ + Register multiple callback/event type pairs, expressed as a dict. + """ + for event_type, callback in type_callback_dict.items(): + self._push_watchers[event_type].add(callback) + self.wait_for_response( + RegisterMessage(event_list=type_callback_dict.keys()), + timeout=register_timeout) diff --git a/cassandra/query.py b/cassandra/query.py index 30bf450117..1232514bfa 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -844,14 +844,14 @@ def populate(self, max_wait=2.0): break def _execute(self, query, parameters, time_spent, max_wait): - timeout = (max_wait - time_spent) if max_wait is not None else None - future = self._session._create_response_future(query, parameters, trace=False, timeout=timeout) # in case the user switched the row factory, set it to namedtuple for this query + future = self._session._create_response_future(query, parameters, trace=False) future.row_factory = named_tuple_factory future.send_request() + timeout = (max_wait - time_spent) if max_wait is not None else None try: - return future.result() + return future.result(timeout=timeout) except OperationTimedOut: raise TraceUnavailable("Trace information was not available within %f seconds" % (max_wait,)) diff --git a/docs/object_mapper.rst b/docs/object_mapper.rst index 4e38994064..26d78a0964 100644 --- a/docs/object_mapper.rst +++ b/docs/object_mapper.rst @@ -48,7 +48,7 @@ Getting Started from cassandra.cqlengine import columns from cassandra.cqlengine import connection from datetime import datetime - from cassandra.cqlengine.management import sync_table + from cassandra.cqlengine.management import create_keyspace, sync_table from cassandra.cqlengine.models import Model #first, define a model diff --git a/tests/unit/test_connection.py b/tests/unit/test_connection.py index a779e1338c..7fc4ed4e5a 100644 --- a/tests/unit/test_connection.py +++ b/tests/unit/test_connection.py @@ -261,7 +261,10 @@ def test_not_implemented(self): Ensure the following methods throw NIE's. If not, come back and test them. """ c = self.make_connection() + self.assertRaises(NotImplementedError, c.close) + self.assertRaises(NotImplementedError, c.register_watcher, None, None) + self.assertRaises(NotImplementedError, c.register_watchers, None) def test_set_keyspace_blocking(self): c = self.make_connection() diff --git a/tests/unit/test_response_future.py b/tests/unit/test_response_future.py index 986945ba4a..027fe73214 100644 --- a/tests/unit/test_response_future.py +++ b/tests/unit/test_response_future.py @@ -47,7 +47,7 @@ def make_session(self): def make_response_future(self, session): query = SimpleStatement("SELECT * FROM foo") message = QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE) - return ResponseFuture(session, message, query, 1) + return ResponseFuture(session, message, query) def make_mock_response(self, results): return Mock(spec=ResultMessage, kind=RESULT_KIND_ROWS, results=results, paging_state=None) @@ -122,7 +122,7 @@ def test_read_timeout_error_message(self): query.retry_policy.on_read_timeout.return_value = (RetryPolicy.RETHROW, None) message = QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE) - rf = ResponseFuture(session, message, query, 1) + rf = ResponseFuture(session, message, query) rf.send_request() result = Mock(spec=ReadTimeoutErrorMessage, info={}) @@ -137,7 +137,7 @@ def test_write_timeout_error_message(self): query.retry_policy.on_write_timeout.return_value = (RetryPolicy.RETHROW, None) message = QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE) - rf = ResponseFuture(session, message, query, 1) + rf = ResponseFuture(session, message, query) rf.send_request() result = Mock(spec=WriteTimeoutErrorMessage, info={}) @@ -151,7 +151,7 @@ def test_unavailable_error_message(self): query.retry_policy.on_unavailable.return_value = (RetryPolicy.RETHROW, None) message = QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE) - rf = ResponseFuture(session, message, query, 1) + rf = ResponseFuture(session, message, query) rf.send_request() result = Mock(spec=UnavailableErrorMessage, info={}) @@ -165,7 +165,7 @@ def test_retry_policy_says_ignore(self): query.retry_policy.on_unavailable.return_value = (RetryPolicy.IGNORE, None) message = QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE) - rf = ResponseFuture(session, message, query, 1) + rf = ResponseFuture(session, message, query) rf.send_request() result = Mock(spec=UnavailableErrorMessage, info={}) @@ -184,7 +184,7 @@ def test_retry_policy_says_retry(self): connection = Mock(spec=Connection) pool.borrow_connection.return_value = (connection, 1) - rf = ResponseFuture(session, message, query, 1) + rf = ResponseFuture(session, message, query) rf.send_request() rf.session._pools.get.assert_called_once_with('ip1') @@ -279,7 +279,7 @@ def test_all_pools_shutdown(self): session._load_balancer.make_query_plan.return_value = ['ip1', 'ip2'] session._pools.get.return_value.is_shutdown = True - rf = ResponseFuture(session, Mock(), Mock(), 1) + rf = ResponseFuture(session, Mock(), Mock()) rf.send_request() self.assertRaises(NoHostAvailable, rf.result) @@ -354,7 +354,7 @@ def test_errback(self): query.retry_policy.on_unavailable.return_value = (RetryPolicy.RETHROW, None) message = QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE) - rf = ResponseFuture(session, message, query, 1) + rf = ResponseFuture(session, message, query) rf.send_request() rf.add_errback(self.assertIsInstance, Exception) @@ -401,7 +401,7 @@ def test_multiple_errbacks(self): query.retry_policy.on_unavailable.return_value = (RetryPolicy.RETHROW, None) message = QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE) - rf = ResponseFuture(session, message, query, 1) + rf = ResponseFuture(session, message, query) rf.send_request() callback = Mock() @@ -431,7 +431,7 @@ def test_add_callbacks(self): message = QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE) # test errback - rf = ResponseFuture(session, message, query, 1) + rf = ResponseFuture(session, message, query) rf.send_request() rf.add_callbacks( @@ -443,7 +443,7 @@ def test_add_callbacks(self): self.assertRaises(Exception, rf.result) # test callback - rf = ResponseFuture(session, message, query, 1) + rf = ResponseFuture(session, message, query) rf.send_request() callback = Mock() From 0dcfa5e24fc3b6b0b95f71e4c13adaed01e50f67 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 15 May 2015 15:34:25 -0500 Subject: [PATCH 1426/3726] Change Cassandra 3.0 references to Cassandra 2.2 --- cassandra/cluster.py | 4 ++-- cassandra/metadata.py | 12 ++++++------ cassandra/query.py | 8 ++++---- tests/integration/__init__.py | 2 +- tests/integration/standard/test_query.py | 16 ++++++++-------- 5 files changed, 21 insertions(+), 21 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 9dee46150b..fe7e3372c4 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2212,7 +2212,7 @@ def _handle_results(success, result): else: raise types_result - # functions were introduced in Cassandra 3.0 + # functions were introduced in Cassandra 2.2 if functions_success: functions_result = dict_factory(*functions_result.results) if functions_result.results else {} else: @@ -2222,7 +2222,7 @@ def _handle_results(success, result): else: raise functions_result - # aggregates were introduced in Cassandra 3.0 + # aggregates were introduced in Cassandra 2.2 if aggregates_success: aggregates_result = dict_factory(*aggregates_result.results) if aggregates_result.results else {} else: diff --git a/cassandra/metadata.py b/cassandra/metadata.py index fe05304fd3..0d872d5b93 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -800,14 +800,14 @@ class KeyspaceMetadata(object): """ A map from user-defined function signatures to instances of :class:`~cassandra.metadata.Function`. - .. versionadded:: 3.0.0 + .. versionadded:: 2.6.0 """ aggregates = None """ A map from user-defined aggregate signatures to instances of :class:`~cassandra.metadata.Aggregate`. - .. versionadded:: 3.0.0 + .. versionadded:: 2.6.0 """ def __init__(self, name, durable_writes, strategy_class, strategy_options): self.name = name @@ -935,9 +935,9 @@ class Aggregate(object): """ A user defined aggregate function, as created by ``CREATE AGGREGATE`` statements. - Aggregate functions were introduced in Cassandra 3.0 + Aggregate functions were introduced in Cassandra 2.2 - .. versionadded:: 3.0.0 + .. versionadded:: 2.6.0 """ keyspace = None @@ -1024,9 +1024,9 @@ class Function(object): """ A user defined function, as created by ``CREATE FUNCTION`` statements. - User-defined functions were introduced in Cassandra 3.0 + User-defined functions were introduced in Cassandra 2.2 - .. versionadded:: 3.0.0 + .. versionadded:: 2.6.0 """ keyspace = None diff --git a/cassandra/query.py b/cassandra/query.py index f5c46c53e2..058ba14494 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -209,7 +209,7 @@ class Statement(object): These are only allowed when using protocol version 4 or higher. - .. versionadded:: 3.0.0 + .. versionadded:: 2.6.0 """ _serial_consistency_level = None @@ -666,7 +666,7 @@ def __init__(self, batch_type=BatchType.LOGGED, retry_policy=None, .. versionchanged:: 2.1.0 Added `serial_consistency_level` as a parameter - .. versionchanged:: 3.0.0 + .. versionchanged:: 2.6.0 Added `custom_payload` as a parameter """ self.batch_type = batch_type @@ -801,7 +801,7 @@ class QueryTrace(object): """ The IP address of the client that issued this request - This is only available when using Cassandra 3.0+ + This is only available when using Cassandra 2.2+ """ coordinator = None @@ -872,7 +872,7 @@ def populate(self, max_wait=2.0): self.started_at = session_row.started_at self.coordinator = session_row.coordinator self.parameters = session_row.parameters - # since C* 3.0 + # since C* 2.2 self.client = getattr(session_row, 'client', None) log.debug("Attempting to fetch trace events for trace ID: %s", self.trace_id) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 18a99c04de..7f0cd3bd9d 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -119,7 +119,7 @@ def _tuple_version(version_string): log.info('Using Cassandra version: %s', CASSANDRA_VERSION) CCM_KWARGS['version'] = CASSANDRA_VERSION -if CASSANDRA_VERSION >= '3.0': +if CASSANDRA_VERSION >= '2.2': default_protocol_version = 4 elif CASSANDRA_VERSION >= '2.1': default_protocol_version = 3 diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index 755e897a41..b5d07f4487 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -108,21 +108,21 @@ def test_client_ip_in_trace(self): Test to validate that client trace contains client ip information. creates a simple query and ensures that the client trace information is present. This will - only be the case if the c* version is 3.0 or greater + only be the case if the c* version is 2.2 or greater - @since 3.0 + @since 2.6.0 @jira_ticket PYTHON-235 - @expected_result client address should be present in C* >= 3, otherwise should be none. + @expected_result client address should be present in C* >= 2.2, otherwise should be none. @test_category tracing + """ - #The current version on the trunk doesn't have the version set to 3.0 yet. + #The current version on the trunk doesn't have the version set to 2.2 yet. #For now we will use the protocol version. Once they update the version on C* trunk #we can use the C*. See below #self._cass_version, self._cql_version = get_server_versions() - #if self._cass_version < (3, 0): - # raise unittest.SkipTest("Client IP was not present in trace until C* 3.0") + #if self._cass_version < (2, 2): + # raise unittest.SkipTest("Client IP was not present in trace until C* 2.2") if PROTOCOL_VERSION < 4: raise unittest.SkipTest( "Protocol 4+ is required for client ip tracing, currently testing against %r" @@ -139,8 +139,8 @@ def test_client_ip_in_trace(self): # Fetch the client_ip from the trace. trace = response_future.get_query_trace(2.0) client_ip = trace.client - # Ensure that ip is set for c* >3 - self.assertIsNotNone(client_ip,"Client IP was not set in trace with C* > 3.0") + # Ensure that ip is set + self.assertIsNotNone(client_ip,"Client IP was not set in trace with C* >= 2.2") self.assertEqual(client_ip,current_host,"Client IP from trace did not match the expected value") cluster.shutdown() From 0646d15fa1c5cff5247750edcf2935ab62242dec Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 15 May 2015 16:25:52 -0500 Subject: [PATCH 1427/3726] Remove is_deterministic from function meta It was removed in the final version of the server changes. Also skipping legacy table test for 2.2, which does not have cassandra-cli --- cassandra/metadata.py | 17 ++++------------ tests/integration/__init__.py | 3 ++- tests/integration/standard/test_metadata.py | 22 +++++++-------------- 3 files changed, 13 insertions(+), 29 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 0d872d5b93..ece39cec16 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -250,7 +250,7 @@ def _build_function(self, keyspace, function_row): return Function(function_row['keyspace_name'], function_row['function_name'], function_row['signature'], function_row['argument_names'], return_type, function_row['language'], function_row['body'], - function_row['is_deterministic'], function_row['called_on_null_input']) + function_row['called_on_null_input']) def _build_aggregate(self, keyspace, aggregate_row): state_type = types.lookup_casstype(aggregate_row['state_type']) @@ -977,8 +977,7 @@ class Aggregate(object): state_type = None """ - Flag indicating whether this function is deterministic - (required for functional indexes) + Type of the aggregate state """ def __init__(self, keyspace, name, type_signature, state_func, @@ -1064,12 +1063,6 @@ class Function(object): Function body string """ - is_deterministic = None - """ - Flag indicating whether this function is deterministic - (required for functional indexes) - """ - called_on_null_input = None """ Flag indicating whether this function should be called for rows with null values @@ -1077,7 +1070,7 @@ class Function(object): """ def __init__(self, keyspace, name, type_signature, argument_names, - return_type, language, body, is_deterministic, called_on_null_input): + return_type, language, body, called_on_null_input): self.keyspace = keyspace self.name = name self.type_signature = type_signature @@ -1085,7 +1078,6 @@ def __init__(self, keyspace, name, type_signature, argument_names, self.return_type = return_type self.language = language self.body = body - self.is_deterministic = is_deterministic self.called_on_null_input = called_on_null_input def as_cql_query(self, formatted=False): @@ -1099,13 +1091,12 @@ def as_cql_query(self, formatted=False): name = protect_name(self.name) arg_list = ', '.join(["%s %s" % (protect_name(n), t) for n, t in zip(self.argument_names, self.type_signature)]) - determ = '' if self.is_deterministic else 'NON DETERMINISTIC ' typ = self.return_type.cql_parameterized_type() lang = self.language body = protect_value(self.body) on_null = "CALLED" if self.called_on_null_input else "RETURNS NULL" - return "CREATE %(determ)sFUNCTION %(keyspace)s.%(name)s(%(arg_list)s)%(sep)s" \ + return "CREATE FUNCTION %(keyspace)s.%(name)s(%(arg_list)s)%(sep)s" \ "%(on_null)s ON NULL INPUT%(sep)s" \ "RETURNS %(typ)s%(sep)s" \ "LANGUAGE %(lang)s%(sep)s" \ diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 7f0cd3bd9d..ddce8de86f 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -197,7 +197,8 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True): except Exception: log.debug("Creating new ccm %s cluster with %s", cluster_name, CCM_KWARGS) cluster = CCMCluster(path, cluster_name, **CCM_KWARGS) - cluster.set_configuration_options({'start_native_transport': True}) + cluster.set_configuration_options({'start_native_transport': True, + 'enable_user_defined_functions': True}) common.switch_cluster(path, cluster_name) cluster.populate(nodes, ipformat=ipformat) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 003c53a955..3f3ef71092 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -568,9 +568,14 @@ def test_token_map(self): def test_legacy_tables(self): - if get_server_versions()[0] < (2, 1, 0): + cass_ver = get_server_versions()[0] + print cass_ver + if cass_ver < (2, 1, 0): raise unittest.SkipTest('Test schema output assumes 2.1.0+ options') + if cass_ver >= (2, 2, 0): + raise unittest.SkipTest('Cannot test cli script on Cassandra 2.2.0+') + if sys.version_info[0:2] != (2, 7): raise unittest.SkipTest('This test compares static strings generated from dict items, which may change orders. Test with 2.7.') @@ -1075,7 +1080,7 @@ def __init__(self, test_case, **kwargs): class FunctionMetadata(FunctionTest): - def make_function_kwargs(self, deterministic=True, called_on_null=True): + def make_function_kwargs(self, called_on_null=True): return {'keyspace': self.keyspace_name, 'name': self.function_name, 'type_signature': ['double', 'int'], @@ -1083,7 +1088,6 @@ def make_function_kwargs(self, deterministic=True, called_on_null=True): 'return_type': DoubleType, 'language': 'java', 'body': 'return new Double(0.0);', - 'is_deterministic': deterministic, 'called_on_null_input': called_on_null} def test_functions_after_udt(self): @@ -1133,18 +1137,6 @@ def test_functions_follow_keyspace_alter(self): finally: self.session.execute('ALTER KEYSPACE %s WITH durable_writes = true' % self.keyspace_name) - def test_function_cql_determinism(self): - kwargs = self.make_function_kwargs() - kwargs['is_deterministic'] = True - with self.VerifiedFunction(self, **kwargs) as vf: - fn_meta = self.keyspace_function_meta[vf.signature] - self.assertRegexpMatches(fn_meta.as_cql_query(), "CREATE FUNCTION.*") - - kwargs['is_deterministic'] = False - with self.VerifiedFunction(self, **kwargs) as vf: - fn_meta = self.keyspace_function_meta[vf.signature] - self.assertRegexpMatches(fn_meta.as_cql_query(), "CREATE NON DETERMINISTIC FUNCTION.*") - def test_function_cql_called_on_null(self): kwargs = self.make_function_kwargs() kwargs['called_on_null_input'] = True From ebfa77c19d98c5a5a90a31db493794aee295af45 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 15 May 2015 16:47:20 -0500 Subject: [PATCH 1428/3726] refresh_type_metadata -> refresh_user_type_metadata code review input --- cassandra/cluster.py | 2 +- docs/api/cassandra/cluster.rst | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index c48a1ebffd..d9be588b6d 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1202,7 +1202,7 @@ def refresh_table_metadata(self, keyspace, table, max_schema_agreement_wait=None if not self.control_connection.refresh_schema(keyspace, table, schema_agreement_wait=max_schema_agreement_wait): raise Exception("Table metadata was not refreshed. See log for details.") - def refresh_type_metadata(self, keyspace, user_type, max_schema_agreement_wait=None): + def refresh_user_type_metadata(self, keyspace, user_type, max_schema_agreement_wait=None): """ Synchronously refresh user defined type metadata. diff --git a/docs/api/cassandra/cluster.rst b/docs/api/cassandra/cluster.rst index 886d07160e..2174a8169f 100644 --- a/docs/api/cassandra/cluster.rst +++ b/docs/api/cassandra/cluster.rst @@ -71,7 +71,7 @@ .. automethod:: refresh_table_metadata - .. automethod:: refresh_type_metadata + .. automethod:: refresh_user_type_metadata .. automethod:: refresh_schema From f05921a583a84c0c749f01f5a596c8240315cab4 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 15 May 2015 16:49:37 -0500 Subject: [PATCH 1429/3726] cqle: Make CQL UPPER in management.drop_table code review input --- cassandra/cqlengine/management.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/cqlengine/management.py b/cassandra/cqlengine/management.py index d84406f959..b5d2cb4620 100644 --- a/cassandra/cqlengine/management.py +++ b/cassandra/cqlengine/management.py @@ -523,7 +523,7 @@ def drop_table(model): try: meta.keyspaces[ks_name].tables[raw_cf_name] - execute('drop table {};'.format(model.column_family_name())) + execute('DROP TABLE {};'.format(model.column_family_name())) except KeyError: pass From ffaaddcf133fb770528611670bca4fd13350feb9 Mon Sep 17 00:00:00 2001 From: Kishan Karunaratne Date: Fri, 15 May 2015 16:16:07 -0700 Subject: [PATCH 1430/3726] Unbreak tests by disabling UDFs in cassandra.yaml --- tests/integration/__init__.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index ddce8de86f..5c09af3bc9 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -197,8 +197,9 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True): except Exception: log.debug("Creating new ccm %s cluster with %s", cluster_name, CCM_KWARGS) cluster = CCMCluster(path, cluster_name, **CCM_KWARGS) - cluster.set_configuration_options({'start_native_transport': True, - 'enable_user_defined_functions': True}) + cluster.set_configuration_options({'start_native_transport': True}) + if CASSANDRA_VERSION >= '2.2': + cluster.set_configuration_options({'enable_user_defined_functions': True}) common.switch_cluster(path, cluster_name) cluster.populate(nodes, ipformat=ipformat) From 830958952b40e2ae972e5fecde3c0358d905fc00 Mon Sep 17 00:00:00 2001 From: Kishan Karunaratne Date: Fri, 15 May 2015 16:51:03 -0700 Subject: [PATCH 1431/3726] temporarily synchronously refresh the schema metadata, for IndexMapTests --- tests/integration/standard/test_metadata.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 3f3ef71092..fea92f2fb4 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -981,6 +981,10 @@ def test_index_updates(self): # both indexes updated when index dropped self.session.execute("DROP INDEX a_idx") + + # temporarily synchronously refresh the schema metadata, until CASSANDRA-9391 is merged in + self.cluster.refresh_schema(self.keyspace_name, self.table_name) + ks_meta = self.cluster.metadata.keyspaces[self.keyspace_name] table_meta = ks_meta.tables[self.table_name] self.assertNotIn('a_idx', ks_meta.indexes) From dcb41c3c9a18eb7aa6eafd5f59f9083fdcf1e9b2 Mon Sep 17 00:00:00 2001 From: Kishan Karunaratne Date: Fri, 15 May 2015 17:18:33 -0700 Subject: [PATCH 1432/3726] Unbreak UDF metadata tests, run only if protocol v 4+ --- tests/integration/standard/test_metadata.py | 87 ++++++++++++--------- 1 file changed, 48 insertions(+), 39 deletions(-) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index fea92f2fb4..612593f597 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -1022,27 +1022,35 @@ class FunctionTest(unittest.TestCase): """ Base functionality for Function and Aggregate metadata test classes """ + + def setUp(self): + """ + Tests are skipped if run with native protocol version < 4 + """ + + if PROTOCOL_VERSION < 4: + raise unittest.SkipTest("Function metadata requires native protocol version 4+") + @property def function_name(self): return self._testMethodName.lower() @classmethod def setup_class(cls): - if PROTOCOL_VERSION < 4: - raise unittest.SkipTest("Function metadata requires native protocol version 4+") - - cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION) - cls.keyspace_name = cls.__name__.lower() - cls.session = cls.cluster.connect() - cls.session.execute("CREATE KEYSPACE IF NOT EXISTS %s WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}" % cls.keyspace_name) - cls.session.set_keyspace(cls.keyspace_name) - cls.keyspace_function_meta = cls.cluster.metadata.keyspaces[cls.keyspace_name].functions - cls.keyspace_aggregate_meta = cls.cluster.metadata.keyspaces[cls.keyspace_name].aggregates + if PROTOCOL_VERSION >= 4: + cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION) + cls.keyspace_name = cls.__name__.lower() + cls.session = cls.cluster.connect() + cls.session.execute("CREATE KEYSPACE IF NOT EXISTS %s WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}" % cls.keyspace_name) + cls.session.set_keyspace(cls.keyspace_name) + cls.keyspace_function_meta = cls.cluster.metadata.keyspaces[cls.keyspace_name].functions + cls.keyspace_aggregate_meta = cls.cluster.metadata.keyspaces[cls.keyspace_name].aggregates @classmethod def teardown_class(cls): - cls.session.execute("DROP KEYSPACE IF EXISTS %s" % cls.keyspace_name) - cls.cluster.shutdown() + if PROTOCOL_VERSION >= 4: + cls.session.execute("DROP KEYSPACE IF EXISTS %s" % cls.keyspace_name) + cls.cluster.shutdown() class Verified(object): @@ -1158,33 +1166,34 @@ class AggregateMetadata(FunctionTest): @classmethod def setup_class(cls): - super(AggregateMetadata, cls).setup_class() - - cls.session.execute("""CREATE OR REPLACE FUNCTION sum_int(s int, i int) - RETURNS NULL ON NULL INPUT - RETURNS int - LANGUAGE javascript AS 's + i';""") - cls.session.execute("""CREATE OR REPLACE FUNCTION sum_int_two(s int, i int, j int) - RETURNS NULL ON NULL INPUT - RETURNS int - LANGUAGE javascript AS 's + i + j';""") - cls.session.execute("""CREATE OR REPLACE FUNCTION "List_As_String"(l list) - RETURNS NULL ON NULL INPUT - RETURNS int - LANGUAGE javascript AS ''''' + l';""") - cls.session.execute("""CREATE OR REPLACE FUNCTION extend_list(s list, i int) - CALLED ON NULL INPUT - RETURNS list - LANGUAGE java AS 'if (i != null) s.add(i.toString()); return s;';""") - cls.session.execute("""CREATE OR REPLACE FUNCTION update_map(s map, i int) - RETURNS NULL ON NULL INPUT - RETURNS map - LANGUAGE java AS 's.put(new Integer(i), new Integer(i)); return s;';""") - cls.session.execute("""CREATE TABLE IF NOT EXISTS t - (k int PRIMARY KEY, v int)""") - for x in range(4): - cls.session.execute("INSERT INTO t (k,v) VALUES (%s, %s)", (x, x)) - cls.session.execute("INSERT INTO t (k) VALUES (%s)", (4,)) + if PROTOCOL_VERSION >= 4: + super(AggregateMetadata, cls).setup_class() + + cls.session.execute("""CREATE OR REPLACE FUNCTION sum_int(s int, i int) + RETURNS NULL ON NULL INPUT + RETURNS int + LANGUAGE javascript AS 's + i';""") + cls.session.execute("""CREATE OR REPLACE FUNCTION sum_int_two(s int, i int, j int) + RETURNS NULL ON NULL INPUT + RETURNS int + LANGUAGE javascript AS 's + i + j';""") + cls.session.execute("""CREATE OR REPLACE FUNCTION "List_As_String"(l list) + RETURNS NULL ON NULL INPUT + RETURNS int + LANGUAGE javascript AS ''''' + l';""") + cls.session.execute("""CREATE OR REPLACE FUNCTION extend_list(s list, i int) + CALLED ON NULL INPUT + RETURNS list + LANGUAGE java AS 'if (i != null) s.add(i.toString()); return s;';""") + cls.session.execute("""CREATE OR REPLACE FUNCTION update_map(s map, i int) + RETURNS NULL ON NULL INPUT + RETURNS map + LANGUAGE java AS 's.put(new Integer(i), new Integer(i)); return s;';""") + cls.session.execute("""CREATE TABLE IF NOT EXISTS t + (k int PRIMARY KEY, v int)""") + for x in range(4): + cls.session.execute("INSERT INTO t (k,v) VALUES (%s, %s)", (x, x)) + cls.session.execute("INSERT INTO t (k) VALUES (%s)", (4,)) def make_aggregate_kwargs(self, state_func, state_type, final_func=None, init_cond=None): return {'keyspace': self.keyspace_name, From b004dc6ff0669d12806cca53b4db38cf27c2ffc2 Mon Sep 17 00:00:00 2001 From: Kishan Karunaratne Date: Fri, 15 May 2015 17:43:02 -0700 Subject: [PATCH 1433/3726] remove extra printline in TestCodeCoverage --- tests/integration/standard/test_metadata.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 612593f597..73ffe134ce 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -569,7 +569,6 @@ def test_token_map(self): def test_legacy_tables(self): cass_ver = get_server_versions()[0] - print cass_ver if cass_ver < (2, 1, 0): raise unittest.SkipTest('Test schema output assumes 2.1.0+ options') From 9b940d25933a5bb4fd633ad388a77bf28b3e1044 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 18 May 2015 08:22:58 -0500 Subject: [PATCH 1434/3726] Correct expected values for Small|TinyInt unit tests --- tests/unit/test_marshalling.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/unit/test_marshalling.py b/tests/unit/test_marshalling.py index eeefb0f174..907cd765b6 100644 --- a/tests/unit/test_marshalling.py +++ b/tests/unit/test_marshalling.py @@ -83,9 +83,9 @@ (b'\x7f\xff\xff\xff', 'SimpleDateType', Date('1969-12-31')), (b'\x00\x00\x00\x00\x00\x00\x00\x01', 'TimeType', Time(1)), (b'\x7f', 'TinyIntType', 127), - (b'\xff\xff\xff\x80', 'TinyIntType', -128), - (b'\xff\xff\x80\x00', 'SmallIntType', 32767), - (b'\xff\xff\x80\x00', 'SmallIntType', -32768) + (b'\x80', 'TinyIntType', -128), + (b'\x7f\xff', 'SmallIntType', 32767), + (b'\x80\x00', 'SmallIntType', -32768) ) ordered_map_value = OrderedMapSerializedKey(UTF8Type, 2) From 65d14ddb7b9c119feae59c760a43fa5b459c48ea Mon Sep 17 00:00:00 2001 From: GregBestland Date: Tue, 12 May 2015 00:29:49 -0500 Subject: [PATCH 1435/3726] Added tests for custom payloads. Fixed formatting in tracing tests. --- tests/integration/__init__.py | 4 +- tests/integration/long/test_custom_payload.py | 201 ++++++++++++++++++ tests/integration/standard/test_query.py | 39 ++-- tox.ini | 2 +- 4 files changed, 223 insertions(+), 23 deletions(-) create mode 100644 tests/integration/long/test_custom_payload.py diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 3ea397f01e..4c3a3ab0a1 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -203,7 +203,7 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True): if start: log.debug("Starting ccm %s cluster", cluster_name) cluster.start(wait_for_binary_proto=True, wait_other_notice=True) - setup_test_keyspace() + setup_keyspace() CCM_CLUSTER = cluster except Exception: @@ -230,7 +230,7 @@ def teardown_package(): log.warn('Did not find cluster: %s' % cluster_name) -def setup_test_keyspace(): +def setup_keyspace(): # wait for nodes to startup time.sleep(10) diff --git a/tests/integration/long/test_custom_payload.py b/tests/integration/long/test_custom_payload.py new file mode 100644 index 0000000000..f131a5e756 --- /dev/null +++ b/tests/integration/long/test_custom_payload.py @@ -0,0 +1,201 @@ +# Copyright 2013-2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +try: + import unittest2 as unittest +except ImportError: + import unittest + +from cassandra.query import (SimpleStatement, BatchStatement, BatchType) +from cassandra.cluster import Cluster + +from tests.integration import use_singledc, PROTOCOL_VERSION, get_cluster, setup_keyspace + + +def setup_module(): + """ + We need some custom setup for this module. All unit tests in this module + require protocol >=4. We won't bother going through the setup required unless that is the + protocol version we are using. + """ + + # If we aren't at protocol v 4 or greater don't waste time setting anything up, all tests will be skipped + if PROTOCOL_VERSION >= 4: + # Don't start the ccm cluster until we get the custom jvm argument specified + use_singledc(start=False) + ccm_cluster = get_cluster() + # if needed stop CCM cluster + ccm_cluster.stop() + # This will enable the Mirroring query handler which will echo our custom payload k,v pairs back to us + jmv_args = [ + " -Dcassandra.custom_query_handler_class=org.apache.cassandra.cql3.CustomPayloadMirroringQueryHandler"] + ccm_cluster.start(wait_for_binary_proto=True, wait_other_notice=True, jvm_args=jmv_args) + # wait for nodes to startup + setup_keyspace() + + +def teardown_module(): + """ + The rests of the tests don't need our custom payload query handle so stop the cluster so we + don't impact other tests + """ + + ccm_cluster = get_cluster() + if ccm_cluster is not None: + ccm_cluster.stop() + + +class CustomPayloadTests(unittest.TestCase): + + def setUp(self): + """ + Test is skipped if run with cql version <4 + """ + + if PROTOCOL_VERSION < 4: + raise unittest.SkipTest( + "Native protocol 4,0+ is required for custom payloads, currently using %r" + % (PROTOCOL_VERSION,)) + + self.cluster = Cluster(protocol_version=PROTOCOL_VERSION) + self.session = self.cluster.connect() + + def tearDown(self): + + self.cluster.shutdown() + + def test_custom_query_basic(self): + """ + Test to validate that custom payloads work with simple queries + + creates a simple query and ensures that custom payloads are passed to C*. A custom + query provider is used with C* so we can validate that same custom payloads are sent back + with the results + + + @since 2.6 + @jira_ticket PYTHON-280 + @expected_result valid custom payloads should be sent and received + + @test_category queries:custom_payload + """ + + # Create a simple query statement a + query = "SELECT * FROM system.local" + statement = SimpleStatement(query) + # Validate that various types of custom payloads are sent and received okay + self.validate_various_custom_payloads(statement=statement) + + def test_custom_query_batching(self): + """ + Test to validate that custom payloads work with batch queries + + creates a batch query and ensures that custom payloads are passed to C*. A custom + query provider is used with C* so we can validate that same custom payloads are sent back + with the results + + + @since 2.6 + @jira_ticket PYTHON-280 + @expected_result valid custom payloads should be sent and received + + @test_category queries:custom_payload + """ + + # Construct Batch Statement + batch = BatchStatement(BatchType.LOGGED) + for i in range(10): + batch.add(SimpleStatement("INSERT INTO test3rf.test (k, v) VALUES (%s, %s)"), (i, i)) + + # Validate that various types of custom payloads are sent and received okay + self.validate_various_custom_payloads(statement=batch) + + def test_custom_query_prepared(self): + """ + Test to validate that custom payloads work with prepared queries + + creates a batch query and ensures that custom payloads are passed to C*. A custom + query provider is used with C* so we can validate that same custom payloads are sent back + with the results + + + @since 2.6 + @jira_ticket PYTHON-280 + @expected_result valid custom payloads should be sent and received + + @test_category queries:custom_payload + """ + + # Construct prepared statement + prepared = self.session.prepare( + """ + INSERT INTO test3rf.test (k, v) VALUES (?, ?) + """) + + bound = prepared.bind((1, None)) + + # Validate that various custom payloads are validated correctly + self.validate_various_custom_payloads(statement=bound) + + def validate_various_custom_payloads(self, statement): + """ + This is a utility method that given a statement will attempt + to submit the statement with various custom payloads. It will + validate that the custom payloads are sent and received correctly. + + @param statement The statement to validate the custom queries in conjunction with + """ + + # Simple key value + custom_payload = {'test': 'test_return'} + self.execute_async_validate_custom_payload(statement=statement, custom_payload=custom_payload) + + # no key value + custom_payload = {'': ''} + self.execute_async_validate_custom_payload(statement=statement, custom_payload=custom_payload) + + # Space value + custom_payload = {' ': ' '} + self.execute_async_validate_custom_payload(statement=statement, custom_payload=custom_payload) + + # Long key value pair + key_value = "x" * 10000 + custom_payload = {key_value: key_value} + self.execute_async_validate_custom_payload(statement=statement, custom_payload=custom_payload) + + # Max supported value key pairs according C* binary protocol v4 should be 65534 (unsigned short max value) + for i in range(65534): + custom_payload[str(i)] = str(i) + self.execute_async_validate_custom_payload(statement=statement, custom_payload=custom_payload) + + # Add one custom payload to this is too many key value pairs and should fail + custom_payload[str(65535)] = str(65535) + with self.assertRaises(ValueError): + self.execute_async_validate_custom_payload(statement=statement, custom_payload=custom_payload) + + def execute_async_validate_custom_payload(self, statement, custom_payload): + """ + This is just a simple method that submits a statement with a payload, and validates + that the custom payload we submitted matches the one that we got back + @param statement The statement to execute + @param custom_payload The custom payload to submit with + """ + + # Submit the statement with our custom payload. Validate the one + # we receive from the server matches + response_future = self.session.execute_async(statement, custom_payload=custom_payload) + response_future.result(timeout=10.0) + returned_custom_payload = response_future.custom_payload + self.assertEqual(custom_payload, returned_custom_payload) \ No newline at end of file diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index 7f84f9ab31..6b139c03f1 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -26,7 +26,7 @@ from cassandra.cluster import Cluster from cassandra.policies import HostDistance -from tests.integration import use_singledc, PROTOCOL_VERSION, get_server_versions +from tests.integration import use_singledc, PROTOCOL_VERSION def setup_module(): @@ -93,40 +93,38 @@ def test_client_ip_in_trace(self): Test to validate that client trace contains client ip information. creates a simple query and ensures that the client trace information is present. This will - only be the case if the c* version is 3.0 or greater + only be the case if the c* version is 2.2 or greater - - @since 3.0 + @since 2.2 @jira_ticket PYTHON-235 - @expected_result client address should be present in C* >= 3, otherwise should be none. + @expected_result client address should be present in C* >= 2.2, otherwise should be none. @test_category tracing -+ """ - #The current version on the trunk doesn't have the version set to 3.0 yet. - #For now we will use the protocol version. Once they update the version on C* trunk - #we can use the C*. See below - #self._cass_version, self._cql_version = get_server_versions() - #if self._cass_version < (3, 0): - # raise unittest.SkipTest("Client IP was not present in trace until C* 3.0") + """ + if PROTOCOL_VERSION < 4: - raise unittest.SkipTest( - "Protocol 4+ is required for client ip tracing, currently testing against %r" - % (PROTOCOL_VERSION,)) + raise unittest.SkipTest( + "Protocol 4+ is required for client ip tracing, currently testing against %r" + % (PROTOCOL_VERSION,)) cluster = Cluster(protocol_version=PROTOCOL_VERSION) session = cluster.connect() + # Make simple query with trace enabled query = "SELECT * FROM system.local" statement = SimpleStatement(query) response_future = session.execute_async(statement, trace=True) - response_future.result(10.0) + response_future.result(timeout=10.0) current_host = response_future._current_host.address + # Fetch the client_ip from the trace. - trace = response_future.get_query_trace(2.0) + trace = response_future.get_query_trace(max_wait=2.0) client_ip = trace.client - # Ensure that ip is set for c* >3 - self.assertIsNotNone(client_ip,"Client IP was not set in trace with C* > 3.0") - self.assertEqual(client_ip,current_host,"Client IP from trace did not match the expected value") + + # Ensure that ip is set for c* >2.2 + self.assertIsNotNone(client_ip, "Client IP was not set in trace with C* > 2.2") + self.assertEqual(client_ip, current_host, "Client IP from trace did not match the expected value") + cluster.shutdown() @@ -547,3 +545,4 @@ def test_inherit_first_rk_prepared_param(self): self.assertIsNotNone(batch.routing_key) self.assertEqual(batch.routing_key, self.prepared.bind((1, 0)).routing_key) + diff --git a/tox.ini b/tox.ini index 6c4aa31a27..d694e7ab03 100644 --- a/tox.ini +++ b/tox.ini @@ -10,8 +10,8 @@ deps = nose [testenv] deps = {[base]deps} + sure==1.2.3 blist - sure setenv = USE_CASS_EXTERNAL=1 commands = {envpython} setup.py build_ext --inplace nosetests --verbosity=2 tests/unit/ From 8d68dc2c905232a1bf7ab0aff9eb05e6d287735c Mon Sep 17 00:00:00 2001 From: GregBestland Date: Mon, 18 May 2015 09:30:43 -0500 Subject: [PATCH 1436/3726] Documentation fixes --- tests/integration/standard/test_query.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index 6b139c03f1..cecb07dd51 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -95,7 +95,7 @@ def test_client_ip_in_trace(self): creates a simple query and ensures that the client trace information is present. This will only be the case if the c* version is 2.2 or greater - @since 2.2 + @since 2.6 @jira_ticket PYTHON-235 @expected_result client address should be present in C* >= 2.2, otherwise should be none. From 16f377cbfc21dda65698ecd942db437458db4d1a Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 18 May 2015 10:06:56 -0500 Subject: [PATCH 1437/3726] Handle server warnings in protocol v4+ PYTHON-315 --- cassandra/cluster.py | 23 +++++++++++++++++++++++ cassandra/protocol.py | 14 ++++++++++++++ 2 files changed, 37 insertions(+) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index fe7e3372c4..3bcbe495e9 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1438,6 +1438,8 @@ def execute(self, query, parameters=None, timeout=_NOT_SET, trace=False, custom_ future = self.execute_async(query, parameters, trace, custom_payload) try: result = future.result(timeout) + if future.warnings: + log.warning("Query returned warnings from the server. See cassandra.protocol log for details.") finally: if trace: try: @@ -2669,6 +2671,7 @@ class ResponseFuture(object): _metrics = None _paging_state = None _custom_payload = None + _warnings = None def __init__(self, session, message, query, default_timeout=None, metrics=None, prepared_statement=None): self.session = session @@ -2756,6 +2759,24 @@ def has_more_pages(self): """ return self._paging_state is not None + @property + def warnings(self): + """ + Warnings returned from the server, if any. This will only be + set for protocol_version 4+. + + Warnings may be returned for such things as oversized batches, + or too many tombstones in slice queries. + + Ensure the future is complete before trying to access this property + (call :meth:`.result()`, or after callback is invoked). + Otherwise it may throw if the response has not been received. + """ + # TODO: When timers are introduced, just make this wait + if not self._event.is_set(): + raise Exception("warnings cannot be retrieved before ResponseFuture is finalized") + return self._warnings + @property def custom_payload(self): """ @@ -2769,6 +2790,7 @@ def custom_payload(self): :return: :ref:`custom_payload`. """ + # TODO: When timers are introduced, just make this wait if not self._event.is_set(): raise Exception("custom_payload cannot be retrieved before ResponseFuture is finalized") return self._custom_payload @@ -2811,6 +2833,7 @@ def _set_result(self, response): self.query.trace_id = trace_id self._query_trace = QueryTrace(trace_id, self.session) + self._warnings = getattr(response, 'warnings', None) self._custom_payload = getattr(response, 'custom_payload', None) if isinstance(response, ResultMessage): diff --git a/cassandra/protocol.py b/cassandra/protocol.py index d88db589e4..f7c7176f69 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -57,6 +57,7 @@ class InternalError(Exception): COMPRESSED_FLAG = 0x01 TRACING_FLAG = 0x02 CUSTOM_PAYLOAD_FLAG = 0x04 +WARNING_FLAG = 0x08 _message_types_by_name = {} _message_types_by_opcode = {} @@ -74,6 +75,7 @@ class _MessageType(object): tracing = False custom_payload = None + warnings = None def to_binary(self, stream_id, protocol_version, compression=None): flags = 0 @@ -133,6 +135,12 @@ def decode_response(protocol_version, user_type_map, stream_id, flags, opcode, b else: trace_id = None + if flags & WARNING_FLAG: + warnings = read_stringlist(body) + flags ^= WARNING_FLAG + else: + warnings = None + if flags & CUSTOM_PAYLOAD_FLAG: custom_payload = read_bytesmap(body) flags ^= CUSTOM_PAYLOAD_FLAG @@ -147,6 +155,12 @@ def decode_response(protocol_version, user_type_map, stream_id, flags, opcode, b msg.stream_id = stream_id msg.trace_id = trace_id msg.custom_payload = custom_payload + msg.warnings = warnings + + if msg.warnings: + for w in msg.warnings: + log.warning("Server warning: %s", w) + return msg From c8235aade9e75931ba2b0e1e6850df62caf4e5d8 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 18 May 2015 16:54:08 -0500 Subject: [PATCH 1438/3726] Remove warn log from Session.execute logging remains in protocol --- cassandra/cluster.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 3bcbe495e9..7d078392eb 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1438,8 +1438,6 @@ def execute(self, query, parameters=None, timeout=_NOT_SET, trace=False, custom_ future = self.execute_async(query, parameters, trace, custom_payload) try: result = future.result(timeout) - if future.warnings: - log.warning("Query returned warnings from the server. See cassandra.protocol log for details.") finally: if trace: try: From bc947d0d60b69410d4ed48175c83ec0166d44f76 Mon Sep 17 00:00:00 2001 From: Stefania Alborghetti Date: Tue, 19 May 2015 11:38:42 +0800 Subject: [PATCH 1439/3726] Fixed DESCRIBE names for small and tiny ints, added column names --- cassandra/cqlengine/columns.py | 16 +++++++++++++++- cassandra/cqltypes.py | 4 ++-- cassandra/protocol.py | 6 +++--- 3 files changed, 20 insertions(+), 6 deletions(-) diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index ae17779590..48a8806c7b 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -377,9 +377,23 @@ def to_database(self, value): return self.validate(value) +class TinyInt(Integer): + """ + Stores an 8-bit signed integer value + """ + db_type = 'tinyint' + + +class SmallInt(Integer): + """ + Stores a 16-bit signed integer value + """ + db_type = 'smallint' + + class BigInt(Integer): """ - Stores a 64-bit signed long value + Stores a 64-bit signed integer value """ db_type = 'bigint' diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index 48cbc198c1..6e9306c7cf 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -424,7 +424,7 @@ def deserialize(byts, protocol_version): def serialize(truth, protocol_version): return int8_pack(truth) -class TinyIntType(_CassandraType): +class ByteType(_CassandraType): typename = 'tinyint' @staticmethod @@ -661,7 +661,7 @@ def deserialize(byts, protocol_version): return util.Date(days) -class SmallIntType(_CassandraType): +class ShortType(_CassandraType): typename = 'smallint' @staticmethod diff --git a/cassandra/protocol.py b/cassandra/protocol.py index f7c7176f69..9aab01d298 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -36,7 +36,7 @@ LongType, MapType, SetType, TimeUUIDType, UTF8Type, UUIDType, UserType, TupleType, lookup_casstype, SimpleDateType, - TimeType, TinyIntType, SmallIntType) + TimeType, ByteType, ShortType) from cassandra.policies import WriteType log = logging.getLogger(__name__) @@ -623,8 +623,8 @@ class ResultMessage(_MessageType): 0x0010: InetAddressType, 0x0011: SimpleDateType, 0x0012: TimeType, - 0x0013: SmallIntType, - 0x0014: TinyIntType, + 0x0013: ShortType, + 0x0014: ByteType, 0x0020: ListType, 0x0021: MapType, 0x0022: SetType, From 9b04537f969ce822a3c2ba099f4bbf12e23d72e6 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 19 May 2015 08:52:35 -0500 Subject: [PATCH 1440/3726] Update unit tests for corrected small/tiny int types --- tests/unit/test_marshalling.py | 8 ++++---- tests/unit/test_types.py | 10 +++++----- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/tests/unit/test_marshalling.py b/tests/unit/test_marshalling.py index 907cd765b6..60ae33fd6b 100644 --- a/tests/unit/test_marshalling.py +++ b/tests/unit/test_marshalling.py @@ -82,10 +82,10 @@ (b'\x80\x00\x00\x01', 'SimpleDateType', Date(1)), (b'\x7f\xff\xff\xff', 'SimpleDateType', Date('1969-12-31')), (b'\x00\x00\x00\x00\x00\x00\x00\x01', 'TimeType', Time(1)), - (b'\x7f', 'TinyIntType', 127), - (b'\x80', 'TinyIntType', -128), - (b'\x7f\xff', 'SmallIntType', 32767), - (b'\x80\x00', 'SmallIntType', -32768) + (b'\x7f', 'ByteType', 127), + (b'\x80', 'ByteType', -128), + (b'\x7f\xff', 'ShortType', 32767), + (b'\x80\x00', 'ShortType', -32768) ) ordered_map_value = OrderedMapSerializedKey(UTF8Type, 2) diff --git a/tests/unit/test_types.py b/tests/unit/test_types.py index 4386380359..77667705d0 100644 --- a/tests/unit/test_types.py +++ b/tests/unit/test_types.py @@ -26,7 +26,7 @@ from cassandra.cqltypes import (BooleanType, lookup_casstype_simple, lookup_casstype, LongType, DecimalType, SetType, cql_typename, CassandraType, UTF8Type, parse_casstype_args, - SimpleDateType, TimeType, TinyIntType, SmallIntType, + SimpleDateType, TimeType, ByteType, ShortType, EmptyValue, _CassandraType, DateType, int64_pack) from cassandra.encoder import cql_quote from cassandra.protocol import (write_string, read_longstring, write_stringmap, @@ -55,8 +55,8 @@ def test_lookup_casstype_simple(self): self.assertEqual(lookup_casstype_simple('UTF8Type'), cassandra.cqltypes.UTF8Type) self.assertEqual(lookup_casstype_simple('DateType'), cassandra.cqltypes.DateType) self.assertEqual(lookup_casstype_simple('SimpleDateType'), cassandra.cqltypes.SimpleDateType) - self.assertEqual(lookup_casstype_simple('TinyIntType'), cassandra.cqltypes.TinyIntType) - self.assertEqual(lookup_casstype_simple('SmallIntType'), cassandra.cqltypes.SmallIntType) + self.assertEqual(lookup_casstype_simple('ByteType'), cassandra.cqltypes.ByteType) + self.assertEqual(lookup_casstype_simple('ShortType'), cassandra.cqltypes.ShortType) self.assertEqual(lookup_casstype_simple('TimeUUIDType'), cassandra.cqltypes.TimeUUIDType) self.assertEqual(lookup_casstype_simple('TimeType'), cassandra.cqltypes.TimeType) self.assertEqual(lookup_casstype_simple('UUIDType'), cassandra.cqltypes.UUIDType) @@ -89,8 +89,8 @@ def test_lookup_casstype(self): self.assertEqual(lookup_casstype('UTF8Type'), cassandra.cqltypes.UTF8Type) self.assertEqual(lookup_casstype('DateType'), cassandra.cqltypes.DateType) self.assertEqual(lookup_casstype('TimeType'), cassandra.cqltypes.TimeType) - self.assertEqual(lookup_casstype('TinyIntType'), cassandra.cqltypes.TinyIntType) - self.assertEqual(lookup_casstype('SmallIntType'), cassandra.cqltypes.SmallIntType) + self.assertEqual(lookup_casstype('ByteType'), cassandra.cqltypes.ByteType) + self.assertEqual(lookup_casstype('ShortType'), cassandra.cqltypes.ShortType) self.assertEqual(lookup_casstype('TimeUUIDType'), cassandra.cqltypes.TimeUUIDType) self.assertEqual(lookup_casstype('UUIDType'), cassandra.cqltypes.UUIDType) self.assertEqual(lookup_casstype('IntegerType'), cassandra.cqltypes.IntegerType) From c40c1da558e11f6ed4bd3854cdadf7f6cbd7a989 Mon Sep 17 00:00:00 2001 From: Kishan Karunaratne Date: Wed, 20 May 2015 11:50:17 -0700 Subject: [PATCH 1441/3726] [PYTHON-244] Update docs for test --- .../integration/cqlengine/model/test_model.py | 23 +++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/tests/integration/cqlengine/model/test_model.py b/tests/integration/cqlengine/model/test_model.py index c37e088f54..0ac81c35e6 100644 --- a/tests/integration/cqlengine/model/test_model.py +++ b/tests/integration/cqlengine/model/test_model.py @@ -51,6 +51,23 @@ class EqualityModel1(Model): self.assertNotEqual(m0, m1) def test_keywords_as_names(self): + """ + Test for CQL keywords as names + + test_keywords_as_names tests that CQL keywords are properly and automatically quoted in cqlengine. It creates + a keyspace, keyspace, which should be automatically quoted to "keyspace" in CQL. It then creates a table, table, + which should also be automatically quoted to "table". It then verfies that operations can be done on the + "keyspace"."table" which has been created. It also verifies that table alternations work and operations can be + performed on the altered table. + + @since 2.6.0 + @jira_ticket PYTHON-244 + @expected_result Cqlengine should quote CQL keywords properly when creating keyspaces and tables. + + @test_category schema:generation + """ + + # If the keyspace exists, it will not be re-created create_keyspace_simple('keyspace', 1) class table(Model): @@ -58,8 +75,10 @@ class table(Model): select = columns.Integer(primary_key=True) table = columns.Text() - # create should work + # In case the table already exists in keyspace drop_table(table) + + # Create should work sync_table(table) created = table.create(select=0, table='table') @@ -67,7 +86,7 @@ class table(Model): self.assertEqual(created.select, selected.select) self.assertEqual(created.table, selected.table) - # alter should work + # Alter should work class table(Model): __keyspace__ = 'keyspace' select = columns.Integer(primary_key=True) From 8b8993c810e26938e3da92cddca17ae6dfba7548 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 20 May 2015 14:19:10 -0500 Subject: [PATCH 1442/3726] Use a namedtuple for prepared column meta. Improve {maintain, read}ability --- cassandra/protocol.py | 4 +++- cassandra/query.py | 27 ++++++++++----------------- tests/unit/test_parameter_binding.py | 17 +++++++++-------- 3 files changed, 22 insertions(+), 26 deletions(-) diff --git a/cassandra/protocol.py b/cassandra/protocol.py index 9aab01d298..9fbec28520 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -13,6 +13,7 @@ # limitations under the License. from __future__ import absolute_import # to enable import io from stdlib +from collections import namedtuple import logging import socket from uuid import UUID @@ -49,6 +50,7 @@ class NotSupportedError(Exception): class InternalError(Exception): pass +ColumnMetadata = namedtuple("ColumnMetadata", ['keyspace_name', 'table_name', 'name', 'type']) HEADER_DIRECTION_FROM_CLIENT = 0x00 HEADER_DIRECTION_TO_CLIENT = 0x80 @@ -727,7 +729,7 @@ def recv_prepared_metadata(cls, f, protocol_version, user_type_map): colcfname = read_string(f) colname = read_string(f) coltype = cls.read_type(f, user_type_map) - column_metadata.append((colksname, colcfname, colname, coltype)) + column_metadata.append(ColumnMetadata(colksname, colcfname, colname, coltype)) return column_metadata, pk_indexes @classmethod diff --git a/cassandra/query.py b/cassandra/query.py index 058ba14494..55da7611ee 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -380,15 +380,15 @@ def from_message(cls, query_id, column_metadata, pk_indexes, cluster_metadata, q partition_key_columns = None routing_key_indexes = None - ks_name, table_name, _, _ = column_metadata[0] - ks_meta = cluster_metadata.keyspaces.get(ks_name) + first_col = column_metadata[0] + ks_meta = cluster_metadata.keyspaces.get(first_col.keyspace_name) if ks_meta: - table_meta = ks_meta.tables.get(table_name) + table_meta = ks_meta.tables.get(first_col.table_name) if table_meta: partition_key_columns = table_meta.partition_key # make a map of {column_name: index} for each column in the statement - statement_indexes = dict((c[2], i) for i, c in enumerate(column_metadata)) + statement_indexes = dict((c.name, i) for i, c in enumerate(column_metadata)) # a list of which indexes in the statement correspond to partition key items try: @@ -447,7 +447,7 @@ def __init__(self, prepared_statement, *args, **kwargs): meta = prepared_statement.column_metadata if meta: - self.keyspace = meta[0][0] + self.keyspace = meta[0].keyspace_name Statement.__init__(self, *args, **kwargs) @@ -472,18 +472,16 @@ def bind(self, values): # sort values accordingly for col in col_meta: try: - values.append(dict_values[col[2]]) + values.append(dict_values[col.name]) except KeyError: raise KeyError( 'Column name `%s` not found in bound dict.' % - (col[2])) + (col.name)) # ensure a 1-to-1 dict keys to columns relationship if len(dict_values) != len(col_meta): # find expected columns - columns = set() - for col in col_meta: - columns.add(col[2]) + columns = set(col.name for col in col_meta) # generate error message if len(dict_values) > len(col_meta): @@ -516,17 +514,12 @@ def bind(self, values): if value is None: self.values.append(None) else: - col_type = col_spec[-1] - try: - self.values.append(col_type.serialize(value, proto_version)) + self.values.append(col_spec.type.serialize(value, proto_version)) except (TypeError, struct.error) as exc: - col_name = col_spec[2] - expected_type = col_type actual_type = type(value) - message = ('Received an argument of invalid type for column "%s". ' - 'Expected: %s, Got: %s; (%s)' % (col_name, expected_type, actual_type, exc)) + 'Expected: %s, Got: %s; (%s)' % (col_spec.name, col_spec.type, actual_type, exc)) raise TypeError(message) return self diff --git a/tests/unit/test_parameter_binding.py b/tests/unit/test_parameter_binding.py index 3fce7b9e91..4cc5938fcf 100644 --- a/tests/unit/test_parameter_binding.py +++ b/tests/unit/test_parameter_binding.py @@ -18,8 +18,9 @@ import unittest # noqa from cassandra.encoder import Encoder -from cassandra.query import bind_params, ValueSequence -from cassandra.query import PreparedStatement, BoundStatement +from cassandra.protocol import ColumnMetadata +from cassandra.query import (bind_params, ValueSequence, PreparedStatement, + BoundStatement) from cassandra.cqltypes import Int32Type from cassandra.util import OrderedDict @@ -80,8 +81,8 @@ def test_invalid_argument_type(self): column_family = 'cf1' column_metadata = [ - (keyspace, column_family, 'foo1', Int32Type), - (keyspace, column_family, 'foo2', Int32Type) + ColumnMetadata(keyspace, column_family, 'foo1', Int32Type), + ColumnMetadata(keyspace, column_family, 'foo2', Int32Type) ] prepared_statement = PreparedStatement(column_metadata=column_metadata, @@ -119,8 +120,8 @@ def test_inherit_fetch_size(self): column_family = 'cf1' column_metadata = [ - (keyspace, column_family, 'foo1', Int32Type), - (keyspace, column_family, 'foo2', Int32Type) + ColumnMetadata(keyspace, column_family, 'foo1', Int32Type), + ColumnMetadata(keyspace, column_family, 'foo2', Int32Type) ] prepared_statement = PreparedStatement(column_metadata=column_metadata, @@ -138,8 +139,8 @@ def test_too_few_parameters_for_key(self): column_family = 'cf1' column_metadata = [ - (keyspace, column_family, 'foo1', Int32Type), - (keyspace, column_family, 'foo2', Int32Type) + ColumnMetadata(keyspace, column_family, 'foo1', Int32Type), + ColumnMetadata(keyspace, column_family, 'foo2', Int32Type) ] prepared_statement = PreparedStatement(column_metadata=column_metadata, From 0b8b37ba89930b08a8fb29a6a2f064656dedad6c Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 20 May 2015 16:55:13 -0500 Subject: [PATCH 1443/3726] Distinguish btw NULL and UNSET when binding, proto v4+ PYTHON-317 --- cassandra/protocol.py | 3 + cassandra/query.py | 104 +++++++++++++----- docs/api/cassandra/query.rst | 3 + .../standard/test_prepared_statements.py | 15 ++- 4 files changed, 93 insertions(+), 32 deletions(-) diff --git a/cassandra/protocol.py b/cassandra/protocol.py index 9fbec28520..13078b46fd 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -64,6 +64,7 @@ class InternalError(Exception): _message_types_by_name = {} _message_types_by_opcode = {} +_UNSET_VALUE = object() class _RegisterMessageType(type): def __init__(cls, name, bases, dct): @@ -1115,6 +1116,8 @@ def read_value(f): def write_value(f, v): if v is None: write_int(f, -1) + elif v is _UNSET_VALUE: + write_int(f, -2) else: write_int(f, len(v)) f.write(v) diff --git a/cassandra/query.py b/cassandra/query.py index 55da7611ee..374808cd0e 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -24,16 +24,30 @@ import struct import time import six +from six.moves import range from cassandra import ConsistencyLevel, OperationTimedOut from cassandra.util import unix_time_from_uuid1 from cassandra.encoder import Encoder import cassandra.encoder +from cassandra.protocol import _UNSET_VALUE from cassandra.util import OrderedDict import logging log = logging.getLogger(__name__) +UNSET_VALUE = _UNSET_VALUE +""" +Specifies an unset value when binding a prepared statement. + +Unset values are ignored, allowing prepared statements to be used without specify + +See https://issues.apache.org/jira/browse/CASSANDRA-7304 for further details on semantics. + +.. versionadded:: 2.6.0 + +Only valid when using native protocol v4+ +""" NON_ALPHA_REGEX = re.compile('[^a-zA-Z0-9]') START_BADCHAR_REGEX = re.compile('^[^a-zA-Z0-9]*') @@ -350,6 +364,7 @@ class PreparedStatement(object): keyspace = None # change to prepared_keyspace in major release routing_key_indexes = None + _routing_key_index_set = None consistency_level = None serial_consistency_level = None @@ -403,11 +418,16 @@ def from_message(cls, query_id, column_metadata, pk_indexes, cluster_metadata, q def bind(self, values): """ Creates and returns a :class:`BoundStatement` instance using `values`. - The `values` parameter **must** be a sequence, such as a tuple or list, - even if there is only one value to bind. + + See :meth:`BoundStatement.bind` for rules on input ``values``. """ return BoundStatement(self).bind(values) + def is_routing_key_index(self, i): + if self._routing_key_index_set is None: + self._routing_key_index_set = set(self.routing_key_indexes) if self.routing_key_indexes else set() + return i in self._routing_key_index_set + def __str__(self): consistency = ConsistencyLevel.value_to_name.get(self.consistency_level, 'Not Set') return (u'' % @@ -455,64 +475,77 @@ def bind(self, values): """ Binds a sequence of values for the prepared statement parameters and returns this instance. Note that `values` *must* be: + * a sequence, even if you are only binding one value, or * a dict that relates 1-to-1 between dict keys and columns + + .. versionchanged:: 2.6.0 + + :data:`~.UNSET_VALUE` was introduced. These can be bound as positional parameters + in a sequence, or by name in a dict. Additionally, when using protocol v4+: + + * short sequences will be extended to match bind parameters with UNSET_VALUE + * names may be omitted from a dict with UNSET_VALUE implied. + """ if values is None: values = () - col_meta = self.prepared_statement.column_metadata - proto_version = self.prepared_statement.protocol_version + col_meta = self.prepared_statement.column_metadata + col_meta_len = len(col_meta) + value_len = len(values) # special case for binding dicts if isinstance(values, dict): - dict_values = values + unbound_values = values.copy() values = [] # sort values accordingly for col in col_meta: try: - values.append(dict_values[col.name]) + values.append(unbound_values.pop(col.name)) except KeyError: - raise KeyError( - 'Column name `%s` not found in bound dict.' % - (col.name)) + if proto_version >= 4: + values.append(UNSET_VALUE) + else: + raise KeyError( + 'Column name `%s` not found in bound dict.' % + (col.name)) - # ensure a 1-to-1 dict keys to columns relationship - if len(dict_values) != len(col_meta): - # find expected columns - columns = set(col.name for col in col_meta) + if unbound_values: + raise ValueError("Unexpected arguments provided to bind(): %s" % unbound_values.keys()) - # generate error message - if len(dict_values) > len(col_meta): - difference = set(dict_values.keys()).difference(columns) - msg = "Too many arguments provided to bind() (got %d, expected %d). " + \ - "Unexpected keys %s." - else: - difference = set(columns).difference(dict_values.keys()) - msg = "Too few arguments provided to bind() (got %d, expected %d). " + \ - "Expected keys %s." + value_len = len(values) - # exit with error message - msg = msg % (len(values), len(col_meta), difference) - raise ValueError(msg) + if value_len < col_meta_len: + columns = set(col.name for col in col_meta) + difference = set(columns).difference(dict_values.keys()) + raise ValueError("Too few arguments provided to bind() (got %d, expected %d). " + "Missing keys %s." % (value_len, col_meta_len, difference)) - if len(values) > len(col_meta): + if value_len > col_meta_len: raise ValueError( "Too many arguments provided to bind() (got %d, expected %d)" % (len(values), len(col_meta))) - if self.prepared_statement.routing_key_indexes and \ - len(values) < len(self.prepared_statement.routing_key_indexes): + # this is fail-fast for clarity pre-v4. When v4 can be assumed, + # the error will be better reported when UNSET_VALUE is implicitly added. + if proto_version < 4 and self.prepared_statement.routing_key_indexes and \ + value_len < len(self.prepared_statement.routing_key_indexes): raise ValueError( "Too few arguments provided to bind() (got %d, required %d for routing key)" % - (len(values), len(self.prepared_statement.routing_key_indexes))) + (value_len, len(self.prepared_statement.routing_key_indexes))) self.raw_values = values self.values = [] for value, col_spec in zip(values, col_meta): if value is None: self.values.append(None) + elif value is UNSET_VALUE: + if proto_version >= 4: + self._append_unset_value() + else: + raise ValueError("Attempt to bind UNSET_VALUE while using unsuitable protocol version (%d < 4)" % proto_version) else: try: self.values.append(col_spec.type.serialize(value, proto_version)) @@ -522,8 +555,21 @@ def bind(self, values): 'Expected: %s, Got: %s; (%s)' % (col_spec.name, col_spec.type, actual_type, exc)) raise TypeError(message) + if proto_version >= 4: + diff = col_meta_len - len(self.values) + if diff: + for _ in range(diff): + self._append_unset_value() + return self + def _append_unset_value(self): + next_index = len(self.values) + if self.prepared_statement.is_routing_key_index(next_index): + col_meta = self.prepared_statement.column_metadata[next_index] + raise ValueError("Cannot bind UNSET_VALUE as a part of the routing key '%s'" % col_meta.name) + self.values.append(UNSET_VALUE) + @property def routing_key(self): if not self.prepared_statement.routing_key_indexes: diff --git a/docs/api/cassandra/query.rst b/docs/api/cassandra/query.rst index 7e40c18457..55c56cf168 100644 --- a/docs/api/cassandra/query.rst +++ b/docs/api/cassandra/query.rst @@ -23,6 +23,9 @@ .. autoclass:: BoundStatement :members: +.. autodata:: UNSET_VALUE + :annotation: + .. autoclass:: BatchStatement (batch_type=BatchType.LOGGED, retry_policy=None, consistency_level=None) :members: diff --git a/tests/integration/standard/test_prepared_statements.py b/tests/integration/standard/test_prepared_statements.py index 9240996ce8..e6f6fc8444 100644 --- a/tests/integration/standard/test_prepared_statements.py +++ b/tests/integration/standard/test_prepared_statements.py @@ -172,15 +172,24 @@ def test_too_many_bind_values_dicts(self): prepared = session.prepare( """ - INSERT INTO test3rf.test (v) VALUES (?) + INSERT INTO test3rf.test (k, v) VALUES (?, ?) """) self.assertIsInstance(prepared, PreparedStatement) - self.assertRaises(ValueError, prepared.bind, {'k': 1, 'v': 2}) + + # too many values + self.assertRaises(ValueError, prepared.bind, {'k': 1, 'v': 2, 'v2': 3}) + + # right number, but one does not belong + self.assertRaises(ValueError, prepared.bind, {'k': 1, 'v2': 3}) # also catch too few variables with dicts self.assertIsInstance(prepared, PreparedStatement) - self.assertRaises(KeyError, prepared.bind, {}) + if PROTOCOL_VERSION < 4: + self.assertRaises(KeyError, prepared.bind, {}) + else: + # post v4, the driver attempts to use UNSET_VALUE for unspecified keys + self.assertRaises(ValueError, prepared.bind, {}) cluster.shutdown() From f0f3d3191bb58ccfd7613a9bf837d8c947994129 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 21 May 2015 09:08:25 -0500 Subject: [PATCH 1444/3726] Fix typo in Function meta argument_names attribute. --- cassandra/metadata.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index ece39cec16..8ede52dc1e 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -1043,7 +1043,7 @@ class Function(object): An ordered list of the types for each argument to the function """ - arguemnt_names = None + argument_names = None """ An ordered list of the names of each argument to the function """ From 2302a7848fa1259a6d9a207dbc1c8553db7fd5c6 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 21 May 2015 13:42:18 -0500 Subject: [PATCH 1445/3726] Add documentation for connection pool settings PYTHON-304 --- cassandra/cluster.py | 18 ++++++++++++++++++ docs/api/cassandra/cluster.rst | 8 ++++++++ 2 files changed, 26 insertions(+) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 07222d84f0..2a94378a3b 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -660,6 +660,13 @@ def get_min_requests_per_connection(self, host_distance): return self._min_requests_per_connection[host_distance] def set_min_requests_per_connection(self, host_distance, min_requests): + """ + Sets a threshold for concurrent requests per connection, below which + connections will be considered for disposal (down to core connections; + see :meth:`~Cluster.set_core_connections_per_host`). + + Pertains to connection pool management in protocol versions {1,2}. + """ if self.protocol_version >= 3: raise UnsupportedOperation( "Cluster.set_min_requests_per_connection() only has an effect " @@ -670,6 +677,13 @@ def get_max_requests_per_connection(self, host_distance): return self._max_requests_per_connection[host_distance] def set_max_requests_per_connection(self, host_distance, max_requests): + """ + Sets a threshold for concurrent requests per connection, above which new + connections will be created to a host (up to max connections; + see :meth:`~Cluster.set_max_connections_per_host`). + + Pertains to connection pool management in protocol versions {1,2}. + """ if self.protocol_version >= 3: raise UnsupportedOperation( "Cluster.set_max_requests_per_connection() only has an effect " @@ -695,6 +709,10 @@ def set_core_connections_per_host(self, host_distance, core_connections): The default is 2 for :attr:`~HostDistance.LOCAL` and 1 for :attr:`~HostDistance.REMOTE`. + Protocol version 1 and 2 are limited in the number of concurrent + requests they can send per connection. The driver implements connection + pooling to support higher levels of concurrency. + If :attr:`~.Cluster.protocol_version` is set to 3 or higher, this is not supported (there is always one connection per host, unless the host is remote and :attr:`connect_to_remote_hosts` is :const:`False`) diff --git a/docs/api/cassandra/cluster.rst b/docs/api/cassandra/cluster.rst index 52e8469600..c3d71e107e 100644 --- a/docs/api/cassandra/cluster.rst +++ b/docs/api/cassandra/cluster.rst @@ -57,6 +57,14 @@ .. automethod:: unregister_listener + .. automethod:: set_max_requests_per_connection + + .. automethod:: get_max_requests_per_connection + + .. automethod:: set_min_requests_per_connection + + .. automethod:: get_min_requests_per_connection + .. automethod:: get_core_connections_per_host .. automethod:: set_core_connections_per_host From a5ca5ee96b213f94df94b8a7d93c55f6389b999a Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 21 May 2015 15:15:41 -0500 Subject: [PATCH 1446/3726] Add synchronous refresh mehtods for UDF, UDA --- cassandra/__init__.py | 6 +++--- cassandra/cluster.py | 22 ++++++++++++++++++++++ docs/api/cassandra/cluster.rst | 4 ++++ 3 files changed, 29 insertions(+), 3 deletions(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index eff836e7f2..3f61909664 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -135,7 +135,7 @@ def __init__(self, name, type_signature): @property def signature(self): """ - function signatue string in the form 'name([type0[,type1[...]]])' + function signature string in the form 'name([type0[,type1[...]]])' can be used to uniquely identify overloaded function names within a keyspace """ @@ -158,7 +158,7 @@ class UserFunctionDescriptor(SignatureDescriptor): type_signature = None """ - Ordered list of CQL argument type name comprising the type signature + Ordered list of CQL argument type names comprising the type signature """ @@ -174,7 +174,7 @@ class UserAggregateDescriptor(SignatureDescriptor): type_signature = None """ - Ordered list of CQL argument type name comprising the type signature + Ordered list of CQL argument type names comprising the type signature """ diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 07222d84f0..84d60c17de 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1262,6 +1262,28 @@ def refresh_user_type_metadata(self, keyspace, user_type, max_schema_agreement_w if not self.control_connection.refresh_schema(keyspace, usertype=user_type, schema_agreement_wait=max_schema_agreement_wait): raise Exception("User Type metadata was not refreshed. See log for details.") + def refresh_user_function_metadata(self, keyspace, function, max_schema_agreement_wait=None): + """ + Synchronously refresh user defined function metadata. + + ``function`` is a :class:`cassandra.UserFunctionDescriptor`. + + See :meth:`~.Cluster.refresh_schema_metadata` for description of ``max_schema_agreement_wait`` behavior + """ + if not self.control_connection.refresh_schema(keyspace, function=function, schema_agreement_wait=max_schema_agreement_wait): + raise Exception("User Function metadata was not refreshed. See log for details.") + + def refresh_user_aggregate_metadata(self, keyspace, aggregate, max_schema_agreement_wait=None): + """ + Synchronously refresh user defined aggregate metadata. + + ``aggregate`` is a :class:`cassandra.UserAggregateDescriptor`. + + See :meth:`~.Cluster.refresh_schema_metadata` for description of ``max_schema_agreement_wait`` behavior + """ + if not self.control_connection.refresh_schema(keyspace, aggregate=aggregate, schema_agreement_wait=max_schema_agreement_wait): + raise Exception("User Aggregate metadata was not refreshed. See log for details.") + def refresh_nodes(self): """ Synchronously refresh the node list and token metadata diff --git a/docs/api/cassandra/cluster.rst b/docs/api/cassandra/cluster.rst index 52e8469600..d66d2aa1ea 100644 --- a/docs/api/cassandra/cluster.rst +++ b/docs/api/cassandra/cluster.rst @@ -73,6 +73,10 @@ .. automethod:: refresh_user_type_metadata + .. automethod:: refresh_user_function_metadata + + .. automethod:: refresh_user_aggregate_metadata + .. automethod:: refresh_schema .. automethod:: refresh_nodes From 7d628a2b4feae23e63c25807afe4653b6525975e Mon Sep 17 00:00:00 2001 From: Kishan Karunaratne Date: Thu, 21 May 2015 15:19:07 -0700 Subject: [PATCH 1447/3726] [PYTHON-291] Tests for refresh schema metadata --- tests/integration/standard/test_cluster.py | 21 +- tests/integration/standard/test_metadata.py | 263 +++++++++++++++++++- 2 files changed, 270 insertions(+), 14 deletions(-) diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index 146859ba81..bf6bb1d476 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -248,7 +248,7 @@ def test_refresh_schema(self): original_meta = cluster.metadata.keyspaces # full schema refresh, with wait - cluster.refresh_schema() + cluster.refresh_schema_metadata() self.assertIsNot(original_meta, cluster.metadata.keyspaces) self.assertEqual(original_meta, cluster.metadata.keyspaces) @@ -262,7 +262,7 @@ def test_refresh_schema_keyspace(self): original_system_meta = original_meta['system'] # only refresh one keyspace - cluster.refresh_schema(keyspace='system') + cluster.refresh_keyspace_metadata('system') current_meta = cluster.metadata.keyspaces self.assertIs(original_meta, current_meta) current_system_meta = current_meta['system'] @@ -279,7 +279,7 @@ def test_refresh_schema_table(self): original_system_schema_meta = original_system_meta.tables['schema_columnfamilies'] # only refresh one table - cluster.refresh_schema(keyspace='system', table='schema_columnfamilies') + cluster.refresh_table_metadata('system', 'schema_columnfamilies') current_meta = cluster.metadata.keyspaces current_system_meta = current_meta['system'] current_system_schema_meta = current_system_meta.tables['schema_columnfamilies'] @@ -309,7 +309,7 @@ def test_refresh_schema_type(self): original_type_meta = original_test1rf_meta.user_types[type_name] # only refresh one type - cluster.refresh_schema(keyspace='test1rf', usertype=type_name) + cluster.refresh_user_type_metadata('test1rf', type_name) current_meta = cluster.metadata.keyspaces current_test1rf_meta = current_meta[keyspace_name] current_type_meta = current_test1rf_meta.user_types[type_name] @@ -345,7 +345,7 @@ def test_refresh_schema_no_wait(self): # cluster agreement wait used for refresh original_meta = c.metadata.keyspaces start_time = time.time() - self.assertRaisesRegexp(Exception, r"Schema was not refreshed.*", c.refresh_schema) + self.assertRaisesRegexp(Exception, r"Schema metadata was not refreshed.*", c.refresh_schema_metadata) end_time = time.time() self.assertGreaterEqual(end_time - start_time, agreement_timeout) self.assertIs(original_meta, c.metadata.keyspaces) @@ -353,7 +353,7 @@ def test_refresh_schema_no_wait(self): # refresh wait overrides cluster value original_meta = c.metadata.keyspaces start_time = time.time() - c.refresh_schema(max_schema_agreement_wait=0) + c.refresh_schema_metadata(max_schema_agreement_wait=0) end_time = time.time() self.assertLess(end_time - start_time, agreement_timeout) self.assertIsNot(original_meta, c.metadata.keyspaces) @@ -373,7 +373,7 @@ def test_refresh_schema_no_wait(self): # cluster agreement wait used for refresh original_meta = c.metadata.keyspaces start_time = time.time() - c.refresh_schema() + c.refresh_schema_metadata() end_time = time.time() self.assertLess(end_time - start_time, refresh_threshold) self.assertIsNot(original_meta, c.metadata.keyspaces) @@ -382,7 +382,8 @@ def test_refresh_schema_no_wait(self): # refresh wait overrides cluster value original_meta = c.metadata.keyspaces start_time = time.time() - self.assertRaisesRegexp(Exception, r"Schema was not refreshed.*", c.refresh_schema, max_schema_agreement_wait=agreement_timeout) + self.assertRaisesRegexp(Exception, r"Schema metadata was not refreshed.*", c.refresh_schema_metadata, + max_schema_agreement_wait=agreement_timeout) end_time = time.time() self.assertGreaterEqual(end_time - start_time, agreement_timeout) self.assertIs(original_meta, c.metadata.keyspaces) @@ -566,8 +567,8 @@ def test_pool_management(self): session.execute('USE system_traces') # refresh schema - cluster.refresh_schema() - cluster.refresh_schema(max_schema_agreement_wait=0) + cluster.refresh_schema_metadata() + cluster.refresh_schema_metadata(max_schema_agreement_wait=0) # submit schema refresh future = cluster.submit_schema_refresh() diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 73ffe134ce..69d599f534 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -24,7 +24,8 @@ import sys import traceback -from cassandra import AlreadyExists, OperationTimedOut, SignatureDescriptor +from cassandra import AlreadyExists, OperationTimedOut, SignatureDescriptor, UserFunctionDescriptor, \ + UserAggregateDescriptor from cassandra.cluster import Cluster from cassandra.cqltypes import DoubleType, Int32Type, ListType, UTF8Type, MapType @@ -115,14 +116,14 @@ def check_create_statement(self, tablemeta, original): self.session.execute(recreate) def get_table_metadata(self): - self.cluster.control_connection.refresh_schema() + self.cluster.refresh_table_metadata(self.ksname, self.cfname) return self.cluster.metadata.keyspaces[self.ksname].tables[self.cfname] def test_basic_table_meta_properties(self): create_statement = self.make_create_statement(["a"], [], ["b", "c"]) self.session.execute(create_statement) - self.cluster.control_connection.refresh_schema() + self.cluster.refresh_schema_metadata() meta = self.cluster.metadata self.assertNotEqual(meta.cluster_name, None) @@ -326,6 +327,260 @@ def test_compression_disabled(self): tablemeta = self.get_table_metadata() self.assertIn("compression = {}", tablemeta.export_as_string()) + def test_refresh_schema_metadata(self): + """ + test for synchronously refreshing all cluster metadata + + test_refresh_schema_metadata tests all cluster metadata is refreshed when calling refresh_schema_metadata(). + It creates a second cluster object with schema_event_refresh_window=-1 such that schema refreshes are disabled + for schema change push events. It then alters the cluster, creating a new keyspace, using the first cluster + object, and verifies that the cluster metadata has not changed in the second cluster object. It then calls + refresh_schema_metadata() and verifies that the cluster metadata is updated in the second cluster object. + Similarly, it then proceeds to altering keyspace, table, UDT, UDF, and UDA metadata and subsequently verfies + that these metadata is updated when refresh_schema_metadata() is called. + + @since 2.6.0 + @jira_ticket PYTHON-291 + @expected_result Cluster, keyspace, table, UDT, UDF, and UDA metadata should be refreshed when refresh_schema_metadata() is called. + + @test_category metadata + """ + + cluster2 = Cluster(protocol_version=PROTOCOL_VERSION, schema_event_refresh_window=-1) + cluster2.connect() + + self.assertNotIn("new_keyspace", cluster2.metadata.keyspaces) + + # Cluster metadata modification + self.session.execute("CREATE KEYSPACE new_keyspace WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'}") + self.assertNotIn("new_keyspace", cluster2.metadata.keyspaces) + + cluster2.refresh_schema_metadata() + self.assertIn("new_keyspace", cluster2.metadata.keyspaces) + + # Keyspace metadata modification + self.session.execute("ALTER KEYSPACE {0} WITH durable_writes = false".format(self.ksname)) + self.assertTrue(cluster2.metadata.keyspaces[self.ksname].durable_writes) + cluster2.refresh_schema_metadata() + self.assertFalse(cluster2.metadata.keyspaces[self.ksname].durable_writes) + + # Table metadata modification + table_name = "test" + self.session.execute("CREATE TABLE {0}.{1} (a int PRIMARY KEY, b text)".format(self.ksname, table_name)) + cluster2.refresh_schema_metadata() + + self.session.execute("ALTER TABLE {0}.{1} ADD c double".format(self.ksname, table_name)) + self.assertNotIn("c", cluster2.metadata.keyspaces[self.ksname].tables[table_name].columns) + cluster2.refresh_schema_metadata() + self.assertIn("c", cluster2.metadata.keyspaces[self.ksname].tables[table_name].columns) + + if PROTOCOL_VERSION >= 3: + # UDT metadata modification + self.session.execute("CREATE TYPE {0}.user (age int, name text)".format(self.ksname)) + self.assertEqual(cluster2.metadata.keyspaces[self.ksname].user_types, {}) + cluster2.refresh_schema_metadata() + self.assertIn("user", cluster2.metadata.keyspaces[self.ksname].user_types) + + if PROTOCOL_VERSION >= 4: + # UDF metadata modification + self.session.execute("""CREATE FUNCTION {0}.sum_int(key int, val int) + RETURNS NULL ON NULL INPUT + RETURNS int + LANGUAGE javascript AS 'key + val';""".format(self.ksname)) + + self.assertEqual(cluster2.metadata.keyspaces[self.ksname].functions, {}) + cluster2.refresh_schema_metadata() + self.assertIn("sum_int(int,int)", cluster2.metadata.keyspaces[self.ksname].functions) + + # UDA metadata modification + self.session.execute("""CREATE AGGREGATE {0}.sum_agg(int) + SFUNC sum_int + STYPE int + INITCOND 0""" + .format(self.ksname)) + + self.assertEqual(cluster2.metadata.keyspaces[self.ksname].aggregates, {}) + cluster2.refresh_schema_metadata() + self.assertIn("sum_agg(int)", cluster2.metadata.keyspaces[self.ksname].aggregates) + + # Cluster metadata modification + self.session.execute("DROP KEYSPACE new_keyspace") + self.assertIn("new_keyspace", cluster2.metadata.keyspaces) + + cluster2.refresh_schema_metadata() + self.assertNotIn("new_keyspace", cluster2.metadata.keyspaces) + + cluster2.shutdown() + + def test_refresh_keyspace_metadata(self): + """ + test for synchronously refreshing keyspace metadata + + test_refresh_keyspace_metadata tests that keyspace metadata is refreshed when calling refresh_keyspace_metadata(). + It creates a second cluster object with schema_event_refresh_window=-1 such that schema refreshes are disabled + for schema change push events. It then alters the keyspace, disabling durable_writes, using the first cluster + object, and verifies that the keyspace metadata has not changed in the second cluster object. Finally, it calls + refresh_keyspace_metadata() and verifies that the keyspace metadata is updated in the second cluster object. + + @since 2.6.0 + @jira_ticket PYTHON-291 + @expected_result Keyspace metadata should be refreshed when refresh_keyspace_metadata() is called. + + @test_category metadata + """ + + cluster2 = Cluster(protocol_version=PROTOCOL_VERSION, schema_event_refresh_window=-1) + cluster2.connect() + + self.assertTrue(cluster2.metadata.keyspaces[self.ksname].durable_writes) + self.session.execute("ALTER KEYSPACE {0} WITH durable_writes = false".format(self.ksname)) + self.assertTrue(cluster2.metadata.keyspaces[self.ksname].durable_writes) + cluster2.refresh_keyspace_metadata(self.ksname) + self.assertFalse(cluster2.metadata.keyspaces[self.ksname].durable_writes) + + cluster2.shutdown() + + def test_refresh_table_metatadata(self): + """ + test for synchronously refreshing table metadata + + test_refresh_table_metatadata tests that table metadata is refreshed when calling test_refresh_table_metatadata(). + It creates a second cluster object with schema_event_refresh_window=-1 such that schema refreshes are disabled + for schema change push events. It then alters the table, adding a new column, using the first cluster + object, and verifies that the table metadata has not changed in the second cluster object. Finally, it calls + test_refresh_table_metatadata() and verifies that the table metadata is updated in the second cluster object. + + @since 2.6.0 + @jira_ticket PYTHON-291 + @expected_result Table metadata should be refreshed when refresh_table_metadata() is called. + + @test_category metadata + """ + + table_name = "test" + self.session.execute("CREATE TABLE {0}.{1} (a int PRIMARY KEY, b text)".format(self.ksname, table_name)) + + cluster2 = Cluster(protocol_version=PROTOCOL_VERSION, schema_event_refresh_window=-1) + cluster2.connect() + + self.assertNotIn("c", cluster2.metadata.keyspaces[self.ksname].tables[table_name].columns) + self.session.execute("ALTER TABLE {0}.{1} ADD c double".format(self.ksname, table_name)) + self.assertNotIn("c", cluster2.metadata.keyspaces[self.ksname].tables[table_name].columns) + + cluster2.refresh_table_metadata(self.ksname, table_name) + self.assertIn("c", cluster2.metadata.keyspaces[self.ksname].tables[table_name].columns) + + cluster2.shutdown() + + def test_refresh_user_type_metadata(self): + """ + test for synchronously refreshing UDT metadata in keyspace + + test_refresh_user_type_metadata tests that UDT metadata in a keyspace is refreshed when calling refresh_user_type_metadata(). + It creates a second cluster object with schema_event_refresh_window=-1 such that schema refreshes are disabled + for schema change push events. It then alters the keyspace, creating a new UDT, using the first cluster + object, and verifies that the UDT metadata has not changed in the second cluster object. Finally, it calls + refresh_user_type_metadata() and verifies that the UDT metadata in the keyspace is updated in the second cluster object. + + @since 2.6.0 + @jira_ticket PYTHON-291 + @expected_result UDT metadata in the keyspace should be refreshed when refresh_user_type_metadata() is called. + + @test_category metadata + """ + + if PROTOCOL_VERSION < 3: + raise unittest.SkipTest("Protocol 3+ is required for UDTs, currently testing against {0}".format(PROTOCOL_VERSION)) + + cluster2 = Cluster(protocol_version=PROTOCOL_VERSION, schema_event_refresh_window=-1) + cluster2.connect() + + self.assertEqual(cluster2.metadata.keyspaces[self.ksname].user_types, {}) + self.session.execute("CREATE TYPE {0}.user (age int, name text)".format(self.ksname)) + self.assertEqual(cluster2.metadata.keyspaces[self.ksname].user_types, {}) + + cluster2.refresh_user_type_metadata(self.ksname, "user") + self.assertIn("user", cluster2.metadata.keyspaces[self.ksname].user_types) + + cluster2.shutdown() + + def test_refresh_user_function_metadata(self): + """ + test for synchronously refreshing UDF metadata in keyspace + + test_refresh_user_function_metadata tests that UDF metadata in a keyspace is refreshed when calling + refresh_user_function_metadata(). It creates a second cluster object with schema_event_refresh_window=-1 such + that schema refreshes are disabled for schema change push events. It then alters the keyspace, creating a new + UDF, using the first cluster object, and verifies that the UDF metadata has not changed in the second cluster + object. Finally, it calls refresh_user_function_metadata() and verifies that the UDF metadata in the keyspace + is updated in the second cluster object. + + @since 2.6.0 + @jira_ticket PYTHON-291 + @expected_result UDF metadata in the keyspace should be refreshed when refresh_user_function_metadata() is called. + + @test_category metadata + """ + + if PROTOCOL_VERSION < 4: + raise unittest.SkipTest("Protocol 4+ is required for UDFs, currently testing against {0}".format(PROTOCOL_VERSION)) + + cluster2 = Cluster(protocol_version=PROTOCOL_VERSION, schema_event_refresh_window=-1) + cluster2.connect() + + self.assertEqual(cluster2.metadata.keyspaces[self.ksname].functions, {}) + self.session.execute("""CREATE FUNCTION {0}.sum_int(key int, val int) + RETURNS NULL ON NULL INPUT + RETURNS int + LANGUAGE javascript AS 'key + val';""".format(self.ksname)) + + self.assertEqual(cluster2.metadata.keyspaces[self.ksname].functions, {}) + cluster2.refresh_user_function_metadata(self.ksname, UserFunctionDescriptor("sum_int", ["int", "int"])) + self.assertIn("sum_int(int,int)", cluster2.metadata.keyspaces[self.ksname].functions) + + cluster2.shutdown() + + def test_refresh_user_aggregate_metadata(self): + """ + test for synchronously refreshing UDA metadata in keyspace + + test_refresh_user_aggregate_metadata tests that UDA metadata in a keyspace is refreshed when calling + refresh_user_aggregate_metadata(). It creates a second cluster object with schema_event_refresh_window=-1 such + that schema refreshes are disabled for schema change push events. It then alters the keyspace, creating a new + UDA, using the first cluster object, and verifies that the UDA metadata has not changed in the second cluster + object. Finally, it calls refresh_user_aggregate_metadata() and verifies that the UDF metadata in the keyspace + is updated in the second cluster object. + + @since 2.6.0 + @jira_ticket PYTHON-291 + @expected_result UDA metadata in the keyspace should be refreshed when refresh_user_aggregate_metadata() is called. + + @test_category metadata + """ + + if PROTOCOL_VERSION < 4: + raise unittest.SkipTest("Protocol 4+ is required for UDAs, currently testing against {0}".format(PROTOCOL_VERSION)) + + cluster2 = Cluster(protocol_version=PROTOCOL_VERSION, schema_event_refresh_window=-1) + cluster2.connect() + + self.assertEqual(cluster2.metadata.keyspaces[self.ksname].aggregates, {}) + self.session.execute("""CREATE FUNCTION {0}.sum_int(key int, val int) + RETURNS NULL ON NULL INPUT + RETURNS int + LANGUAGE javascript AS 'key + val';""".format(self.ksname)) + + self.session.execute("""CREATE AGGREGATE {0}.sum_agg(int) + SFUNC sum_int + STYPE int + INITCOND 0""" + .format(self.ksname)) + + self.assertEqual(cluster2.metadata.keyspaces[self.ksname].aggregates, {}) + cluster2.refresh_user_aggregate_metadata(self.ksname, UserAggregateDescriptor("sum_agg", ["int"])) + self.assertIn("sum_agg(int)", cluster2.metadata.keyspaces[self.ksname].aggregates) + + cluster2.shutdown() class TestCodeCoverage(unittest.TestCase): @@ -982,7 +1237,7 @@ def test_index_updates(self): self.session.execute("DROP INDEX a_idx") # temporarily synchronously refresh the schema metadata, until CASSANDRA-9391 is merged in - self.cluster.refresh_schema(self.keyspace_name, self.table_name) + self.cluster.refresh_table_metadata(self.keyspace_name, self.table_name) ks_meta = self.cluster.metadata.keyspaces[self.keyspace_name] table_meta = ks_meta.tables[self.table_name] From e4d39fbf6f5a8c7d125dc68c4aada665f810bd0b Mon Sep 17 00:00:00 2001 From: Kishan Karunaratne Date: Thu, 21 May 2015 17:05:40 -0700 Subject: [PATCH 1448/3726] [PYTHON-207] Test for automatic repreparation error --- .../standard/test_prepared_statements.py | 28 +++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/tests/integration/standard/test_prepared_statements.py b/tests/integration/standard/test_prepared_statements.py index 9240996ce8..a2a9beb8a8 100644 --- a/tests/integration/standard/test_prepared_statements.py +++ b/tests/integration/standard/test_prepared_statements.py @@ -325,3 +325,31 @@ def test_async_binding_dicts(self): self.assertEqual(results[0].v, None) cluster.shutdown() + + def test_raise_error_on_prepared_statement_execution_dropped_table(self): + """ + test for error in executing prepared statement on a dropped table + + test_raise_error_on_execute_prepared_statement_dropped_table tests that an InvalidRequest is raised when a + prepared statement is executed after its corresponding table is dropped. This happens because if a prepared + statement is invalid, the driver attempts to automatically re-prepare it on a non-existing table. + + @expected_errors InvalidRequest If a prepared statement is executed on a dropped table + + @since 2.6.0 + @jira_ticket PYTHON-207 + @expected_result InvalidRequest error should be raised upon prepared statement execution. + + @test_category prepared_statements + """ + cluster = Cluster(protocol_version=PROTOCOL_VERSION) + session = cluster.connect("test3rf") + + session.execute("CREATE TABLE error_test (k int PRIMARY KEY, v int)") + prepared = session.prepare("SELECT * FROM error_test WHERE k=?") + session.execute("DROP TABLE error_test") + + with self.assertRaises(InvalidRequest): + session.execute(prepared, [0]) + + cluster.shutdown() \ No newline at end of file From 44016c8136a79c82d6ce133f210a02127c22e3af Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 22 May 2015 09:51:08 -0500 Subject: [PATCH 1449/3726] Add FAQ section to docs PYTHON-115 --- docs/cqlengine/faq.rst | 8 ++-- docs/faq.rst | 83 ++++++++++++++++++++++++++++++++++++++++++ docs/index.rst | 6 +++ 3 files changed, 93 insertions(+), 4 deletions(-) create mode 100644 docs/faq.rst diff --git a/docs/cqlengine/faq.rst b/docs/cqlengine/faq.rst index 6342538de4..dcaefae22c 100644 --- a/docs/cqlengine/faq.rst +++ b/docs/cqlengine/faq.rst @@ -2,15 +2,15 @@ Frequently Asked Questions ========================== -Q: Why don't updates work correctly on models instantiated as Model(field=value, field2=value2)? +Why don't updates work correctly on models instantiated as Model(field=value, field2=value2)? ------------------------------------------------------------------------------------------------ -A: The recommended way to create new rows is with the models .create method. The values passed into a model's init method are interpreted by the model as the values as they were read from a row. This allows the model to "know" which rows have changed since the row was read out of cassandra, and create suitable update statements. +The recommended way to create new rows is with the models .create method. The values passed into a model's init method are interpreted by the model as the values as they were read from a row. This allows the model to "know" which rows have changed since the row was read out of cassandra, and create suitable update statements. -Q: How to preserve ordering in batch query? +How to preserve ordering in batch query? ------------------------------------------- -A: Statement Ordering is not supported by CQL3 batches. Therefore, +Statement Ordering is not supported by CQL3 batches. Therefore, once cassandra needs resolving conflict(Updating the same column in one batch), The algorithm below would be used. diff --git a/docs/faq.rst b/docs/faq.rst new file mode 100644 index 0000000000..56cb648a24 --- /dev/null +++ b/docs/faq.rst @@ -0,0 +1,83 @@ +Frequently Asked Questions +========================== + +See also :doc:`cqlengine FAQ ` + +Why do connections or IO operations timeout in my WSGI application? +------------------------------------------------------------------- +Depending on your application process model, it may be forking after driver Session is created. Most IO reactors do not handle this, and problems will manifest as timeouts. + +To avoid this, make sure to create sessions per process, after the fork. Using uWSGI and Flask for example: + +.. code-block:: python + + from flask import Flask + from uwsgidecorators import postfork + from cassandra.cluster import Cluster + + session = None + prepared = None + + @postfork + def connect(): + global session, prepared + session = Cluster().connect() + prepared = session.prepare("SELECT release_version FROM system.local WHERE key=?") + + app = Flask(__name__) + + @app.route('/') + def server_version(): + row = session.execute(prepared, ('local',))[0] + return row.release_version + +uWSGI provides a ``postfork`` hook you can use to create sessions and prepared statements after the child process forks. + +How do I trace a request? +------------------------- +Request tracing can be turned on for any request by setting ``trace=True`` in :meth:`.Session.execute_async`. View the results by waiting on the future, then :meth:`.ResponseFuture.get_query_trace`. +Since tracing is done asynchronously to the request, this method polls until the trace is complete before querying data. + +.. code-block:: python + + >>> future = session.execute_async("SELECT * FROM system.local", trace=True) + >>> result = future.result() + >>> trace = future.get_query_trace() + >>> for e in trace.events: + >>> print e.source_elapsed, e.description + + 0:00:00.000077 Parsing select * from system.local + 0:00:00.000153 Preparing statement + 0:00:00.000309 Computing ranges to query + 0:00:00.000368 Submitting range requests on 1 ranges with a concurrency of 1 (279.77142 rows per range expected) + 0:00:00.000422 Submitted 1 concurrent range requests covering 1 ranges + 0:00:00.000480 Executing seq scan across 1 sstables for (min(-9223372036854775808), min(-9223372036854775808)) + 0:00:00.000669 Read 1 live and 0 tombstone cells + 0:00:00.000755 Scanned 1 rows and matched 1 + +``trace`` is a :class:`QueryTrace` object. + +How do I determine the replicas for a query? +---------------------------------------------- +With prepared statements, the replicas are obtained by ``routing_key``, based on current cluster token metadata: + +.. code-block:: python + + >>> prepared = session.prepare("SELECT * FROM example.t WHERE key=?") + >>> bound = prepared.bind((1,)) + >>> replicas = cluster.metadata.get_replicas(bound.keyspace, bound.routing_key) + >>> for h in replicas: + >>> print h.address + 127.0.0.1 + 127.0.0.2 + +``replicas`` is a list of :class:`Host` objects. + +How does the driver manage request retries? +------------------------------------------- +By default, retries are managed by the :attr:`.Cluster.default_retry_policy` set on the session Cluster. It can also +be specialized per statement by setting :attr:`.Statement.retry_policy`. + +Retries are presently attempted on the same coordinator, but this may change in the future. + +Please see :class:`.policies.RetryPolicy` for further details. diff --git a/docs/index.rst b/docs/index.rst index e28b00685b..0b89c369da 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -39,6 +39,9 @@ Contents :doc:`security` An overview of the security features of the driver. +:doc:`faq` + A collection of Frequently Asked Questions + .. toctree:: :hidden: @@ -51,9 +54,12 @@ Contents security user_defined_types object_mapper + faq Getting Help ------------ +Visit the :doc:`FAQ section ` in this documentation. + Please send questions to the `mailing list `_. Alternatively, you can use IRC. Connect to the #datastax-drivers channel on irc.freenode.net. From 88e927fd299e22d669d1906bbbb53aad2a09c350 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 22 May 2015 13:03:04 -0500 Subject: [PATCH 1450/3726] Remove dead conditional in BoundStatement.bind --- cassandra/query.py | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/cassandra/query.py b/cassandra/query.py index 374808cd0e..6709cbeff6 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -392,7 +392,6 @@ def from_message(cls, query_id, column_metadata, pk_indexes, cluster_metadata, q if pk_indexes: routing_key_indexes = pk_indexes else: - partition_key_columns = None routing_key_indexes = None first_col = column_metadata[0] @@ -512,16 +511,10 @@ def bind(self, values): 'Column name `%s` not found in bound dict.' % (col.name)) - if unbound_values: - raise ValueError("Unexpected arguments provided to bind(): %s" % unbound_values.keys()) - value_len = len(values) - if value_len < col_meta_len: - columns = set(col.name for col in col_meta) - difference = set(columns).difference(dict_values.keys()) - raise ValueError("Too few arguments provided to bind() (got %d, expected %d). " - "Missing keys %s." % (value_len, col_meta_len, difference)) + if unbound_values: + raise ValueError("Unexpected arguments provided to bind(): %s" % unbound_values.keys()) if value_len > col_meta_len: raise ValueError( From effd1a9fc4c0186b1ca6a2677a0a1d44982eef11 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 22 May 2015 13:03:35 -0500 Subject: [PATCH 1451/3726] Unit tests for binding NULL vs UNSET in protocol v4 --- tests/unit/test_parameter_binding.py | 139 +++++++++++++++++++-------- 1 file changed, 100 insertions(+), 39 deletions(-) diff --git a/tests/unit/test_parameter_binding.py b/tests/unit/test_parameter_binding.py index 4cc5938fcf..6b84260279 100644 --- a/tests/unit/test_parameter_binding.py +++ b/tests/unit/test_parameter_binding.py @@ -20,7 +20,7 @@ from cassandra.encoder import Encoder from cassandra.protocol import ColumnMetadata from cassandra.query import (bind_params, ValueSequence, PreparedStatement, - BoundStatement) + BoundStatement, UNSET_VALUE) from cassandra.cqltypes import Int32Type from cassandra.util import OrderedDict @@ -74,42 +74,42 @@ def test_float_precision(self): self.assertEqual(float(bind_params("%s", (f,), Encoder())), f) -class BoundStatementTestCase(unittest.TestCase): +class BoundStatementTestV1(unittest.TestCase): - def test_invalid_argument_type(self): - keyspace = 'keyspace1' - column_family = 'cf1' + protocol_version=1 - column_metadata = [ - ColumnMetadata(keyspace, column_family, 'foo1', Int32Type), - ColumnMetadata(keyspace, column_family, 'foo2', Int32Type) - ] - - prepared_statement = PreparedStatement(column_metadata=column_metadata, - query_id=None, - routing_key_indexes=[], - query=None, - keyspace=keyspace, - protocol_version=2) - bound_statement = BoundStatement(prepared_statement=prepared_statement) - - values = ['nonint', 1] + @classmethod + def setUpClass(cls): + cls.prepared = PreparedStatement(column_metadata=[ + ColumnMetadata('keyspace', 'cf', 'rk0', Int32Type), + ColumnMetadata('keyspace', 'cf', 'rk1', Int32Type), + ColumnMetadata('keyspace', 'cf', 'ck0', Int32Type), + ColumnMetadata('keyspace', 'cf', 'v0', Int32Type) + ], + query_id=None, + routing_key_indexes=[1, 0], + query=None, + keyspace='keyspace', + protocol_version=cls.protocol_version) + cls.bound = BoundStatement(prepared_statement=cls.prepared) + def test_invalid_argument_type(self): + values = (0, 0, 0, 'string not int') try: - bound_statement.bind(values) + self.bound.bind(values) except TypeError as e: - self.assertIn('foo1', str(e)) + self.assertIn('v0', str(e)) self.assertIn('Int32Type', str(e)) self.assertIn('str', str(e)) else: self.fail('Passed invalid type but exception was not thrown') - values = [1, ['1', '2']] + values = (['1', '2'], 0, 0, 0) try: - bound_statement.bind(values) + self.bound.bind(values) except TypeError as e: - self.assertIn('foo2', str(e)) + self.assertIn('rk0', str(e)) self.assertIn('Int32Type', str(e)) self.assertIn('list', str(e)) else: @@ -129,28 +129,89 @@ def test_inherit_fetch_size(self): routing_key_indexes=[], query=None, keyspace=keyspace, - protocol_version=2) + protocol_version=self.protocol_version) prepared_statement.fetch_size = 1234 bound_statement = BoundStatement(prepared_statement=prepared_statement) self.assertEqual(1234, bound_statement.fetch_size) - def test_too_few_parameters_for_key(self): - keyspace = 'keyspace1' - column_family = 'cf1' + def test_too_few_parameters_for_routing_key(self): + self.assertRaises(ValueError, self.prepared.bind, (1,)) - column_metadata = [ - ColumnMetadata(keyspace, column_family, 'foo1', Int32Type), - ColumnMetadata(keyspace, column_family, 'foo2', Int32Type) - ] + bound = self.prepared.bind((1, 2)) + self.assertEqual(bound.keyspace, 'keyspace') - prepared_statement = PreparedStatement(column_metadata=column_metadata, + def test_dict_missing_routing_key(self): + self.assertRaises(KeyError, self.bound.bind, {'rk0': 0, 'ck0': 0, 'v0': 0}) + self.assertRaises(KeyError, self.bound.bind, {'rk1': 0, 'ck0': 0, 'v0': 0}) + + def test_missing_value(self): + self.assertRaises(KeyError, self.bound.bind, {'rk0': 0, 'rk1': 0, 'ck0': 0}) + # legacy behavior: short parameters are allowed through, errored by server + #self.assertRaises(ValueError, self.bound.bind, (0, 0, 0)) + + def test_dict_extra_value(self): + self.assertRaises(ValueError, self.bound.bind, {'rk0': 0, 'rk1': 0, 'ck0': 0, 'v0': 0, 'should_not_be_here': 123}) + self.assertRaises(ValueError, self.bound.bind, (0, 0, 0, 0, 123)) + + def test_values_none(self): + # should have values + self.assertRaises(ValueError, self.bound.bind, None) + + # prepared statement with no values + prepared_statement = PreparedStatement(column_metadata=[], query_id=None, - routing_key_indexes=[0, 1], + routing_key_indexes=[], query=None, - keyspace=keyspace, - protocol_version=2) + keyspace='whatever', + protocol_version=self.protocol_version) + bound = prepared_statement.bind(None) + self.assertListEqual(bound.values, []) + + def test_bind_none(self): + self.bound.bind({'rk0': 0, 'rk1': 0, 'ck0': 0, 'v0': None}) + self.assertEqual(self.bound.values[-1], None) + + old_values = self.bound.values + self.bound.bind((0, 0, 0, None)) + self.assertIsNot(self.bound.values, old_values) + self.assertEqual(self.bound.values[-1], None) + + def test_unset_value(self): + self.assertRaises(ValueError, self.bound.bind, {'rk0': 0, 'rk1': 0, 'ck0': 0, 'v0': UNSET_VALUE}) + self.assertRaises(ValueError, self.bound.bind, (0, 0, 0, UNSET_VALUE)) + + +class BoundStatementTestV2(BoundStatementTestV1): + protocol_version=2 + + +class BoundStatementTestV3(BoundStatementTestV1): + protocol_version=3 + + +class BoundStatementTestV4(BoundStatementTestV1): + protocol_version=4 + + def test_dict_missing_routing_key(self): + # in v4 it implicitly binds UNSET_VALUE for missing items, + # UNSET_VALUE is ValueError for routing keys + self.assertRaises(ValueError, self.bound.bind, {'rk0': 0, 'ck0': 0, 'v0': 0}) + self.assertRaises(ValueError, self.bound.bind, {'rk1': 0, 'ck0': 0, 'v0': 0}) + + def test_missing_value(self): + # in v4 missing values are UNSET_VALUE + self.bound.bind({'rk0': 0, 'rk1': 0, 'ck0': 0}) + self.assertEqual(self.bound.values[-1], UNSET_VALUE) + + old_values = self.bound.values + self.bound.bind((0, 0, 0)) + self.assertIsNot(self.bound.values, old_values) + self.assertEqual(self.bound.values[-1], UNSET_VALUE) - self.assertRaises(ValueError, prepared_statement.bind, (1,)) + def test_unset_value(self): + self.bound.bind({'rk0': 0, 'rk1': 0, 'ck0': 0, 'v0': UNSET_VALUE}) + self.assertEqual(self.bound.values[-1], UNSET_VALUE) - bound = prepared_statement.bind((1, 2)) - self.assertEqual(bound.keyspace, keyspace) + old_values = self.bound.values + self.bound.bind((0, 0, 0, UNSET_VALUE)) + self.assertEqual(self.bound.values[-1], UNSET_VALUE) From 43f1ea748ceb52d4aebfcb12f18c43583c2a46ac Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 22 May 2015 14:12:25 -0500 Subject: [PATCH 1452/3726] Integration test for binding UNSET values in v4+ PYTHON-317 --- .../standard/test_prepared_statements.py | 50 ++++++++++++++++++- 1 file changed, 49 insertions(+), 1 deletion(-) diff --git a/tests/integration/standard/test_prepared_statements.py b/tests/integration/standard/test_prepared_statements.py index e6f6fc8444..9d44a383ba 100644 --- a/tests/integration/standard/test_prepared_statements.py +++ b/tests/integration/standard/test_prepared_statements.py @@ -22,7 +22,7 @@ from cassandra import InvalidRequest from cassandra.cluster import Cluster -from cassandra.query import PreparedStatement +from cassandra.query import PreparedStatement, UNSET_VALUE def setup_module(): @@ -222,6 +222,54 @@ def test_none_values(self): cluster.shutdown() + def test_unset_values(self): + """ + Test to validate that UNSET_VALUEs are bound, and have the expected effect + + Prepare a statement and insert all values. Then follow with execute excluding + parameters. Verify that the original values are unaffected. + + @since 2.6.0 + + @jira_ticket PYTHON-317 + @expected_result UNSET_VALUE is implicitly added to bind parameters, and properly encoded, leving unset values unaffected. + + @test_category prepared_statements:binding + """ + if PROTOCOL_VERSION < 4: + raise unittest.SkipTest("Binding UNSET values is not supported in protocol version < 4") + + cluster = Cluster(protocol_version=PROTOCOL_VERSION) + session = cluster.connect() + + # table with at least two values so one can be used as a marker + session.execute("CREATE TABLE IF NOT EXISTS test1rf.test_unset_values (k int PRIMARY KEY, v0 int, v1 int)") + insert = session.prepare( "INSERT INTO test1rf.test_unset_values (k, v0, v1) VALUES (?, ?, ?)") + select = session.prepare( "SELECT * FROM test1rf.test_unset_values WHERE k=?") + + bind_expected = [ + # initial condition + ((0, 0, 0), (0, 0, 0)), + # unset implicit + ((0, 1,), (0, 1, 0)), + ({'k': 0, 'v0': 2}, (0, 2, 0)), + ({'k': 0, 'v1': 1}, (0, 2, 1)), + # unset explicit + ((0, 3, UNSET_VALUE), (0, 3, 1)), + ((0, UNSET_VALUE, 2), (0, 3, 2)), + ({'k': 0, 'v0': 4, 'v1': UNSET_VALUE}, (0, 4, 2)), + ({'k': 0, 'v0': UNSET_VALUE, 'v1': 3}, (0, 4, 3)), + # nulls still work + ((0, None, None), (0, None, None)), + ] + + for params, expected in bind_expected: + session.execute(insert, params) + results = session.execute(select, (0,)) + self.assertEqual(results[0], expected) + + cluster.shutdown() + def test_no_meta(self): cluster = Cluster(protocol_version=PROTOCOL_VERSION) session = cluster.connect() From 5bd44a97c32d587412389fa1148ff01fba615951 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 22 May 2015 14:21:44 -0500 Subject: [PATCH 1453/3726] Fix test for wrong dict values passed to bind PYTHON-317 --- tests/integration/standard/test_prepared_statements.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/tests/integration/standard/test_prepared_statements.py b/tests/integration/standard/test_prepared_statements.py index 9d44a383ba..8c50d7b747 100644 --- a/tests/integration/standard/test_prepared_statements.py +++ b/tests/integration/standard/test_prepared_statements.py @@ -163,7 +163,7 @@ def test_too_many_bind_values(self): def test_too_many_bind_values_dicts(self): """ - Ensure a ValueError is thrown when attempting to bind too many variables + Ensure an error is thrown when attempting to bind the wrong values with dict bindings """ @@ -181,7 +181,12 @@ def test_too_many_bind_values_dicts(self): self.assertRaises(ValueError, prepared.bind, {'k': 1, 'v': 2, 'v2': 3}) # right number, but one does not belong - self.assertRaises(ValueError, prepared.bind, {'k': 1, 'v2': 3}) + if PROTOCOL_VERSION < 4: + # pre v4, the driver bails with key error when 'v' is found missing + self.assertRaises(KeyError, prepared.bind, {'k': 1, 'v2': 3}) + else: + # post v4, the driver uses UNSET_VALUE for 'v' and bails when 'v2' is unbound + self.assertRaises(ValueError, prepared.bind, {'k': 1, 'v2': 3}) # also catch too few variables with dicts self.assertIsInstance(prepared, PreparedStatement) From 09505e65974af138763f46ac0bfdf24fa738a040 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Thu, 21 May 2015 17:51:43 -0500 Subject: [PATCH 1454/3726] [PYTHON-238] Added tests for protocol v4 Exceptions, and MISC test cleanup --- tests/integration/long/test_failure_types.py | 279 +++++++++++++++++++ tests/integration/standard/test_query.py | 8 +- 2 files changed, 285 insertions(+), 2 deletions(-) create mode 100644 tests/integration/long/test_failure_types.py diff --git a/tests/integration/long/test_failure_types.py b/tests/integration/long/test_failure_types.py new file mode 100644 index 0000000000..a927dc95f9 --- /dev/null +++ b/tests/integration/long/test_failure_types.py @@ -0,0 +1,279 @@ +# Copyright 2013-2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +try: + import unittest2 as unittest +except ImportError: + import unittest + + +from cassandra.cluster import Cluster +from cassandra import ConsistencyLevel +from cassandra import WriteFailure, ReadFailure, FunctionFailure +from cassandra.concurrent import execute_concurrent_with_args +from cassandra.query import SimpleStatement +from tests.integration import use_singledc, PROTOCOL_VERSION, get_cluster, setup_keyspace + + +def setup_module(): + """ + We need some custom setup for this module. All unit tests in this module + require protocol >=4. We won't bother going through the setup required unless that is the + protocol version we are using. + """ + + # If we aren't at protocol v 4 or greater don't waste time setting anything up, all tests will be skipped + if PROTOCOL_VERSION >= 4: + use_singledc(start=False) + ccm_cluster = get_cluster() + ccm_cluster.stop() + config_options = {'tombstone_failure_threshold': 2000, 'tombstone_warn_threshold': 1000} + ccm_cluster.set_configuration_options(config_options) + ccm_cluster.start(wait_for_binary_proto=True, wait_other_notice=True) + setup_keyspace() + + +def teardown_module(): + """ + The rest of the tests don't need custom tombstones + reset the config options so as to not mess with other tests. + """ + if PROTOCOL_VERSION >= 4: + ccm_cluster = get_cluster() + config_options = {} + ccm_cluster.set_configuration_options(config_options) + if ccm_cluster is not None: + ccm_cluster.stop() + + +class ClientExceptionTests(unittest.TestCase): + + def setUp(self): + """ + Test is skipped if run with native protocol version <4 + """ + + if PROTOCOL_VERSION < 4: + raise unittest.SkipTest( + "Native protocol 4,0+ is required for custom payloads, currently using %r" + % (PROTOCOL_VERSION,)) + + self.cluster = Cluster(protocol_version=PROTOCOL_VERSION) + self.session = self.cluster.connect() + self.nodes_currently_failing = [] + self.node1, self.node2, self.node3 = get_cluster().nodes.values() + + def tearDown(self): + + self.cluster.shutdown() + failing_nodes = [] + + # Restart the nodes to fully functional again + self.setFailingNodes(failing_nodes, "testksfail") + + def setFailingNodes(self, failing_nodes, keyspace): + """ + This method will take in a set of failing nodes, and toggle all of the nodes in the provided list to fail + writes. + @param failing_nodes A definitive list of nodes that should fail writes + @param keyspace The keyspace to enable failures on + + """ + + # Ensure all of the nodes on the list have failures enabled + for node in failing_nodes: + if node not in self.nodes_currently_failing: + node.stop(wait_other_notice=True, gently=False) + node.start(jvm_args=[" -Dcassandra.test.fail_writes_ks=" + keyspace], wait_for_binary_proto=True, + wait_other_notice=True) + self.nodes_currently_failing.append(node) + + # Ensure all nodes not on the list, but that are currently set to failing are enabled + for node in self.nodes_currently_failing: + if node not in failing_nodes: + node.stop(wait_other_notice=True, gently=False) + node.start(wait_for_binary_proto=True, wait_other_notice=True) + self.nodes_currently_failing.remove(node) + + def _perform_cql_statement(self, text, consistency_level, expected_exception): + """ + Simple helper method to preform cql statements and check for expected exception + @param text CQl statement to execute + @param consistency_level Consistency level at which it is to be executed + @param expected_exception Exception expected to be throw or none + """ + statement = SimpleStatement(text) + statement.consistency_level = consistency_level + + if expected_exception is None: + self.session.execute(statement) + else: + with self.assertRaises(expected_exception): + self.session.execute(statement) + + def test_write_failures_from_coordinator(self): + """ + Test to validate that write failures from the coordinator are surfaced appropriately. + + test_write_failures_from_coordinator Enable write failures on the various nodes using a custom jvm flag, + cassandra.test.fail_writes_ks. This will cause writes to fail on that specific node. Depending on the replication + factor of the keyspace, and the consistency level, we will expect the coordinator to send WriteFailure, or not. + + + @since 2.6.0 + @jira_ticket PYTHON-238 + @expected_result Appropriate write failures from the coordinator + + @test_category queries:basic + """ + + # Setup temporary keyspace. + self._perform_cql_statement( + """ + CREATE KEYSPACE testksfail + WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '3'} + """, consistency_level=ConsistencyLevel.ALL, expected_exception=None) + + # create table + self._perform_cql_statement( + """ + CREATE TABLE testksfail.test ( + k int PRIMARY KEY, + v int ) + """, consistency_level=ConsistencyLevel.ALL, expected_exception=None) + + # Disable one node + failing_nodes = [self.node1] + self.setFailingNodes(failing_nodes, "testksfail") + + # With one node disabled we would expect a write failure with ConsistencyLevel of all + self._perform_cql_statement( + """ + INSERT INTO testksfail.test (k, v) VALUES (1, 0 ) + """, consistency_level=ConsistencyLevel.ALL, expected_exception=WriteFailure) + + # We have two nodes left so a write with consistency level of QUORUM should complete as expected + self._perform_cql_statement( + """ + INSERT INTO testksfail.test (k, v) VALUES (1, 0 ) + """, consistency_level=ConsistencyLevel.QUORUM, expected_exception=None) + + failing_nodes = [] + + # Restart the nodes to fully functional again + self.setFailingNodes(failing_nodes, "testksfail") + + # Drop temporary keyspace + self._perform_cql_statement( + """ + DROP KEYSPACE testksfail + """, consistency_level=ConsistencyLevel.ANY, expected_exception=None) + + def test_tombstone_overflow_read_failure(self): + """ + Test to validate that a ReadFailure is returned from the node when a specified threshold of tombstombs is + reached. + + test_tombstomb_overflow_read_failure First sets the tombstone failure threshold down to a level that allows it + to be more easily encountered. We then create some wide rows and ensure they are deleted appropriately. This + produces the correct amount of tombstombs. Upon making a simple query we expect to get a read failure back + from the coordinator. + + + @since 2.6.0 + @jira_ticket PYTHON-238 + @expected_result Appropriate write failures from the coordinator + + @test_category queries:basic + """ + + # Setup table for "wide row" + self._perform_cql_statement( + """ + CREATE TABLE test3rf.test2 ( + k int, + v0 int, + v1 int, PRIMARY KEY (k,v0)) + """, consistency_level=ConsistencyLevel.ALL, expected_exception=None) + + statement = self.session.prepare("INSERT INTO test3rf.test2 (k, v0,v1) VALUES (1,?,1)") + parameters = [(x,) for x in range(3000)] + execute_concurrent_with_args(self.session, statement, parameters, concurrency=50) + + statement = self.session.prepare("DELETE v1 FROM test3rf.test2 WHERE k = 1 AND v0 =?") + parameters = [(x,) for x in range(2001)] + execute_concurrent_with_args(self.session, statement, parameters, concurrency=50) + + self._perform_cql_statement( + """ + SELECT * FROM test3rf.test2 WHERE k = 1 + """, consistency_level=ConsistencyLevel.ALL, expected_exception=ReadFailure) + + self._perform_cql_statement( + """ + DROP TABLE test3rf.test2; + """, consistency_level=ConsistencyLevel.ALL, expected_exception=None) + + def test_user_function_failure(self): + """ + Test to validate that exceptions in user defined function are correctly surfaced by the driver to us. + + test_user_function_failure First creates a table to use for testing. Then creates a function that will throw an + exception when invoked. It then invokes the function and expects a FunctionException. Finally it preforms + cleanup operations. + + @since 2.6.0 + @jira_ticket PYTHON-238 + @expected_result Function failures when UDF throws exception + + @test_category queries:basic + """ + + # create UDF that throws an exception + self._perform_cql_statement( + """ + CREATE FUNCTION test3rf.test_failure(d double) + RETURNS NULL ON NULL INPUT + RETURNS double + LANGUAGE java AS 'throw new RuntimeException("failure");'; + """, consistency_level=ConsistencyLevel.ALL, expected_exception=None) + + # Create test table + self._perform_cql_statement( + """ + CREATE TABLE test3rf.d (k int PRIMARY KEY , d double); + """, consistency_level=ConsistencyLevel.ALL, expected_exception=None) + + # Insert some values + self._perform_cql_statement( + """ + INSERT INTO test3rf.d (k,d) VALUES (0, 5.12); + """, consistency_level=ConsistencyLevel.ALL, expected_exception=None) + + # Run the function expect a function failure exception + self._perform_cql_statement( + """ + SELECT test_failure(d) FROM test3rf.d WHERE k = 0; + """, consistency_level=ConsistencyLevel.ALL, expected_exception=FunctionFailure) + + self._perform_cql_statement( + """ + DROP FUNCTION test3rf.test_failure; + """, consistency_level=ConsistencyLevel.ALL, expected_exception=None) + + self._perform_cql_statement( + """ + DROP TABLE test3rf.d; + """, consistency_level=ConsistencyLevel.ALL, expected_exception=None) diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index 97de830b85..dc98505b87 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -28,6 +28,8 @@ from tests.integration import use_singledc, PROTOCOL_VERSION +import re + def setup_module(): use_singledc() @@ -136,15 +138,17 @@ def test_client_ip_in_trace(self): statement = SimpleStatement(query) response_future = session.execute_async(statement, trace=True) response_future.result(timeout=10.0) - current_host = response_future._current_host.address # Fetch the client_ip from the trace. trace = response_future.get_query_trace(max_wait=2.0) client_ip = trace.client + # Ip address should be in the local_host range + pat = re.compile("127.0.0.\d{1,3}") + # Ensure that ip is set self.assertIsNotNone(client_ip, "Client IP was not set in trace with C* >= 2.2") - self.assertEqual(client_ip, current_host, "Client IP from trace did not match the expected value") + self.assertTrue(pat.match(client_ip), "Client IP from trace did not match the expected value") cluster.shutdown() From 7aa8f3037538b786814ec870ce8282e7f3c534db Mon Sep 17 00:00:00 2001 From: Tom Lin Date: Mon, 25 May 2015 16:11:35 +0800 Subject: [PATCH 1455/3726] Check if max_attempts is None --- cassandra/policies.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/cassandra/policies.py b/cassandra/policies.py index ce879abb5e..d4d8914c8b 100644 --- a/cassandra/policies.py +++ b/cassandra/policies.py @@ -509,14 +509,16 @@ def __init__(self, delay, max_attempts=64): """ if delay < 0: raise ValueError("delay must not be negative") - if max_attempts < 0: + if max_attempts is not None and max_attempts < 0: raise ValueError("max_attempts must not be negative") self.delay = delay self.max_attempts = max_attempts def new_schedule(self): - return repeat(self.delay, self.max_attempts) + if self.max_attempts: + return repeat(self.delay, self.max_attempts) + return repeat(self.delay) class ExponentialReconnectionPolicy(ReconnectionPolicy): From b43a947d4a7a88e395939b370bd616f396dee6a1 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 19 May 2015 16:27:48 -0500 Subject: [PATCH 1456/3726] cqle: introduce 'date' and 'time' column types PYTHON-245 --- cassandra/cqlengine/columns.py | 63 +++++++++++++++--------- cassandra/cqltypes.py | 28 ++++++----- cassandra/util.py | 4 +- docs/api/cassandra/cqlengine/columns.rst | 8 ++- 4 files changed, 63 insertions(+), 40 deletions(-) diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index 15c05532fc..9edf0d6588 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -19,9 +19,8 @@ import six import warnings -from cassandra.cqltypes import DateType -from cassandra.encoder import cql_quote - +from cassandra import util +from cassandra.cqltypes import DateType, SimpleDateType from cassandra.cqlengine import ValidationError log = logging.getLogger(__name__) @@ -361,6 +360,10 @@ def to_database(self, value): class TinyInt(Integer): """ Stores an 8-bit signed integer value + + .. versionadded:: 2.6.0 + + requires C* 2.2+ and protocol v4+ """ db_type = 'tinyint' @@ -368,6 +371,10 @@ class TinyInt(Integer): class SmallInt(Integer): """ Stores a 16-bit signed integer value + + .. versionadded:: 2.6.0 + + requires C* 2.2+ and protocol v4+ """ db_type = 'smallint' @@ -466,35 +473,43 @@ def to_database(self, value): class Date(Column): """ - *Note: this type is overloaded, and will likely be changed or removed to accommodate distinct date type - in a future version* + Stores a simple date, with no time-of-day + + .. versionchanged:: 2.6.0 + + removed overload of Date and DateTime. DateTime is a drop-in replacement for legacy models - Stores a date value, with no time-of-day + requires C* 2.2+ and protocol v4+ """ - db_type = 'timestamp' + db_type = 'date' - def to_python(self, value): + def to_database(self, value): + value = super(Date, self).to_database(value) if value is None: return - if isinstance(value, datetime): - return value.date() - elif isinstance(value, date): - return value - try: - return datetime.utcfromtimestamp(value).date() - except TypeError: - return datetime.utcfromtimestamp(DateType.deserialize(value)).date() + + # need to translate to int version because some dates are not representable in + # string form (datetime limitation) + d = value if isinstance(value, util.Date) else util.Date(value) + return d.days_from_epoch + SimpleDateType.EPOCH_OFFSET_DAYS + + +class Time(Column): + """ + Stores a timezone-naive time-of-day, with nanosecond precision + + .. versionadded:: 2.6.0 + + requires C* 2.2+ and protocol v4+ + """ + db_type = 'time' def to_database(self, value): - value = super(Date, self).to_database(value) + value = super(Time, self).to_database(value) if value is None: return - if isinstance(value, datetime): - value = value.date() - if not isinstance(value, date): - raise ValidationError("{} '{}' is not a date object".format(self.column_name, repr(value))) - - return int((value - date(1970, 1, 1)).total_seconds() * 1000) + # str(util.Time) yields desired CQL encoding + return value if isinstance(value, util.Time) else util.Time(value) class UUID(Column): @@ -852,7 +867,7 @@ class UserDefinedType(Column): def __init__(self, user_type, **kwargs): """ - :param type user_type: specifies the :class:`~.UserType` model of the column + :param type user_type: specifies the :class:`~.cqlengine.usertype.UserType` model of the column """ self.user_type = user_type self.db_type = "frozen<%s>" % user_type.type_name() diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index 6e9306c7cf..db0011344d 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -638,27 +638,29 @@ class SimpleDateType(_CassandraType): typename = 'date' date_format = "%Y-%m-%d" + # Values of the 'date'` type are encoded as 32-bit unsigned integers + # representing a number of days with epoch (January 1st, 1970) at the center of the + # range (2^31). + EPOCH_OFFSET_DAYS = 2 ** 31 + @classmethod def validate(cls, val): if not isinstance(val, util.Date): val = util.Date(val) return val + @staticmethod + def deserialize(byts, protocol_version): + days = uint32_unpack(byts) - SimpleDateType.EPOCH_OFFSET_DAYS + return util.Date(days) + @staticmethod def serialize(val, protocol_version): - # Values of the 'date'` type are encoded as 32-bit unsigned integers - # representing a number of days with epoch (January 1st, 1970) at the center of the - # range (2^31). try: days = val.days_from_epoch except AttributeError: days = util.Date(val).days_from_epoch - return uint32_pack(days + 2 ** 31) - - @staticmethod - def deserialize(byts, protocol_version): - days = uint32_unpack(byts) - 2 ** 31 - return util.Date(days) + return uint32_pack(days + SimpleDateType.EPOCH_OFFSET_DAYS) class ShortType(_CassandraType): @@ -682,6 +684,10 @@ def validate(cls, val): val = util.Time(val) return val + @staticmethod + def deserialize(byts, protocol_version): + return util.Time(int64_unpack(byts)) + @staticmethod def serialize(val, protocol_version): try: @@ -690,10 +696,6 @@ def serialize(val, protocol_version): nano = util.Time(val).nanosecond_time return int64_pack(nano) - @staticmethod - def deserialize(byts, protocol_version): - return util.Time(int64_unpack(byts)) - class UTF8Type(_CassandraType): typename = 'text' diff --git a/cassandra/util.py b/cassandra/util.py index 35e1e5e40f..dd4633596d 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -917,7 +917,7 @@ def __str__(self): class Date(object): ''' - Idealized naive date: year, month, day + Idealized date: year, month, day Offers wider year range than datetime.date. For Dates that cannot be represented as a datetime.date (because datetime.MINYEAR, datetime.MAXYEAR), this type falls back @@ -997,5 +997,5 @@ def __str__(self): dt = datetime_from_timestamp(self.seconds) return "%04d-%02d-%02d" % (dt.year, dt.month, dt.day) except: - # If we overflow datetime.[MIN|M + # If we overflow datetime.[MIN|MAX] return str(self.days_from_epoch) diff --git a/docs/api/cassandra/cqlengine/columns.rst b/docs/api/cassandra/cqlengine/columns.rst index a214d2cd94..aad164b606 100644 --- a/docs/api/cassandra/cqlengine/columns.rst +++ b/docs/api/cassandra/cqlengine/columns.rst @@ -52,7 +52,7 @@ Columns of all types are initialized by passing :class:`.Column` attributes to t .. autoclass:: Counter -.. autoclass:: Date(**kwargs) +.. autoclass:: Date .. autoclass:: DateTime(**kwargs) @@ -70,10 +70,16 @@ Columns of all types are initialized by passing :class:`.Column` attributes to t .. autoclass:: Set +.. autoclass:: SmallInt + .. autoclass:: Text +.. autoclass:: Time + .. autoclass:: TimeUUID(**kwargs) +.. autoclass:: TinyInt + .. autoclass:: UserDefinedType .. autoclass:: UUID(**kwargs) From f23cc3a46a3372b2f6984c5e96a6852c4fa60964 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 26 May 2015 10:57:24 -0500 Subject: [PATCH 1457/3726] Add Date type to CQL encoder --- cassandra/encoder.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/cassandra/encoder.py b/cassandra/encoder.py index d9c3e85212..2d2433333e 100644 --- a/cassandra/encoder.py +++ b/cassandra/encoder.py @@ -29,7 +29,7 @@ import six from cassandra.util import (OrderedDict, OrderedMap, OrderedMapSerializedKey, - sortedset, Time) + sortedset, Time, Date) if six.PY3: long = int @@ -76,6 +76,7 @@ def __init__(self): datetime.datetime: self.cql_encode_datetime, datetime.date: self.cql_encode_date, datetime.time: self.cql_encode_time, + Date: self.cql_encode_date_ext, Time: self.cql_encode_time, dict: self.cql_encode_map_collection, OrderedDict: self.cql_encode_map_collection, @@ -163,11 +164,18 @@ def cql_encode_date(self, val): def cql_encode_time(self, val): """ - Converts a :class:`datetime.date` object to a string with format + Converts a :class:`cassandra.util.Time` object to a string with format ``HH:MM:SS.mmmuuunnn``. """ return "'%s'" % val + def cql_encode_date_ext(self, val): + """ + Encodes a :class:`cassandra.util.Date` object as an integer + """ + # using the int form in case the Date exceeds datetime.[MIN|MAX]YEAR + return str(val.days_from_epoch + 2 ** 31) + def cql_encode_sequence(self, val): """ Converts a sequence to a string of the form ``(item1, item2, ...)``. This From d3626fb8cd50358969250d934662e40b45c6b2a7 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 26 May 2015 10:58:05 -0500 Subject: [PATCH 1458/3726] cqle: tweak existing Date tests to expect Date type PYTHON-245 --- .../cqlengine/columns/test_validation.py | 21 +++++++++---------- .../cqlengine/columns/test_value_io.py | 12 +++++------ .../cqlengine/model/test_model_io.py | 9 ++++---- .../integration/cqlengine/model/test_udts.py | 3 ++- 4 files changed, 23 insertions(+), 22 deletions(-) diff --git a/tests/integration/cqlengine/columns/test_validation.py b/tests/integration/cqlengine/columns/test_validation.py index 8d4959b80d..94d0d156e4 100644 --- a/tests/integration/cqlengine/columns/test_validation.py +++ b/tests/integration/cqlengine/columns/test_validation.py @@ -18,10 +18,6 @@ from uuid import uuid4, uuid1 from cassandra import InvalidRequest -from cassandra.cqlengine.management import sync_table, drop_table -from cassandra.cqlengine.models import Model, ValidationError -from cassandra.cqlengine.connection import execute - from cassandra.cqlengine.columns import TimeUUID from cassandra.cqlengine.columns import Text from cassandra.cqlengine.columns import Integer @@ -33,6 +29,10 @@ from cassandra.cqlengine.columns import Boolean from cassandra.cqlengine.columns import Decimal from cassandra.cqlengine.columns import Inet +from cassandra.cqlengine.connection import execute +from cassandra.cqlengine.management import sync_table, drop_table +from cassandra.cqlengine.models import Model, ValidationError +from cassandra import util from tests.integration.cqlengine.base import BaseCassEngTestCase @@ -55,7 +55,7 @@ def tearDownClass(cls): def test_datetime_io(self): now = datetime.now() - dt = self.DatetimeTest.objects.create(test_id=0, created_at=now) + self.DatetimeTest.objects.create(test_id=0, created_at=now) dt2 = self.DatetimeTest.objects(test_id=0).first() assert dt2.created_at.timetuple()[:6] == now.timetuple()[:6] @@ -164,16 +164,15 @@ def tearDownClass(cls): def test_date_io(self): today = date.today() self.DateTest.objects.create(test_id=0, created_at=today) - dt2 = self.DateTest.objects(test_id=0).first() - assert dt2.created_at.isoformat() == today.isoformat() + result = self.DateTest.objects(test_id=0).first() + self.assertEqual(result.created_at, util.Date(today)) def test_date_io_using_datetime(self): now = datetime.utcnow() self.DateTest.objects.create(test_id=0, created_at=now) - dt2 = self.DateTest.objects(test_id=0).first() - assert not isinstance(dt2.created_at, datetime) - assert isinstance(dt2.created_at, date) - assert dt2.created_at.isoformat() == now.date().isoformat() + result = self.DateTest.objects(test_id=0).first() + self.assertIsInstance(result.created_at, util.Date) + self.assertEqual(result.created_at, util.Date(now)) def test_date_none(self): self.DateTest.objects.create(test_id=1, created_at=None) diff --git a/tests/integration/cqlengine/columns/test_value_io.py b/tests/integration/cqlengine/columns/test_value_io.py index 17f96e2242..9b5947dff3 100644 --- a/tests/integration/cqlengine/columns/test_value_io.py +++ b/tests/integration/cqlengine/columns/test_value_io.py @@ -17,13 +17,13 @@ from uuid import uuid1, uuid4, UUID import six -from tests.integration.cqlengine.base import BaseCassEngTestCase - +from cassandra.cqlengine import columns from cassandra.cqlengine.management import sync_table from cassandra.cqlengine.management import drop_table from cassandra.cqlengine.models import Model -from cassandra.cqlengine import columns -import unittest +from cassandra import util + +from tests.integration.cqlengine.base import BaseCassEngTestCase class BaseColumnIOTest(BaseCassEngTestCase): @@ -151,9 +151,9 @@ class TestDate(BaseColumnIOTest): column = columns.Date - now = datetime.now().date() + now = util.Date(datetime.now().date()) pkey_val = now - data_val = now + timedelta(days=1) + data_val = util.Date(now.days_from_epoch + 1) class TestUUID(BaseColumnIOTest): diff --git a/tests/integration/cqlengine/model/test_model_io.py b/tests/integration/cqlengine/model/test_model_io.py index 1af9a8af0f..afee65d698 100644 --- a/tests/integration/cqlengine/model/test_model_io.py +++ b/tests/integration/cqlengine/model/test_model_io.py @@ -17,14 +17,15 @@ from datetime import date, datetime from decimal import Decimal from operator import itemgetter -from cassandra.cqlengine import CQLEngineException -from tests.integration.cqlengine.base import BaseCassEngTestCase +from cassandra.cqlengine import columns +from cassandra.cqlengine import CQLEngineException from cassandra.cqlengine.management import sync_table from cassandra.cqlengine.management import drop_table from cassandra.cqlengine.models import Model -from cassandra.cqlengine import columns +from cassandra.util import Date +from tests.integration.cqlengine.base import BaseCassEngTestCase class TestModel(Model): @@ -161,7 +162,7 @@ class AllDatatypesModel(Model): sync_table(AllDatatypesModel) - input = ['ascii', 2 ** 63 - 1, bytearray(b'hello world'), True, date(1970, 1, 1), + input = ['ascii', 2 ** 63 - 1, bytearray(b'hello world'), True, Date(date(1970, 1, 1)), datetime.utcfromtimestamp(872835240), Decimal('12.3E+7'), 2.39, 3.4028234663852886e+38, '123.123.123.123', 2147483647, 'text', UUID('FE2B4360-28C6-11E2-81C1-0800200C9A66'), UUID('067e6162-3b6f-4ae2-a171-2470b63dff00'), diff --git a/tests/integration/cqlengine/model/test_udts.py b/tests/integration/cqlengine/model/test_udts.py index 3299fda310..af2abaa0e5 100644 --- a/tests/integration/cqlengine/model/test_udts.py +++ b/tests/integration/cqlengine/model/test_udts.py @@ -21,6 +21,7 @@ from cassandra.cqlengine.usertype import UserType from cassandra.cqlengine import columns from cassandra.cqlengine.management import sync_table, sync_type, create_keyspace_simple, drop_keyspace +from cassandra.util import Date from tests.integration import get_server_versions from tests.integration.cqlengine.base import BaseCassEngTestCase @@ -273,7 +274,7 @@ class AllDatatypesModel(Model): sync_table(AllDatatypesModel) - input = AllDatatypes(a='ascii', b=2 ** 63 - 1, c=bytearray(b'hello world'), d=True, e=date(1970, 1, 1), + input = AllDatatypes(a='ascii', b=2 ** 63 - 1, c=bytearray(b'hello world'), d=True, e=Date(date(1970, 1, 1)), f=datetime.utcfromtimestamp(872835240), g=Decimal('12.3E+7'), h=2.39, i=3.4028234663852886e+38, j='123.123.123.123', k=2147483647, l='text', m=UUID('FE2B4360-28C6-11E2-81C1-0800200C9A66'), n=UUID('067e6162-3b6f-4ae2-a171-2470b63dff00'), From 0d2a8b4727eef3f863b8273a1f37c111b5898133 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 26 May 2015 12:10:28 -0500 Subject: [PATCH 1459/3726] Test infinite max attempts for ConstantReconnectionPolicy PYTHON-325 --- tests/unit/test_policies.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tests/unit/test_policies.py b/tests/unit/test_policies.py index 3e24f71fac..4f865fb1ee 100644 --- a/tests/unit/test_policies.py +++ b/tests/unit/test_policies.py @@ -799,6 +799,14 @@ def test_schedule_negative_max_attempts(self): except ValueError: pass + def test_schedule_infinite_attempts(self): + delay = 2 + max_attempts = None + crp = ConstantReconnectionPolicy(delay=delay, max_attempts=max_attempts) + # this is infinite. we'll just verify one more than default + for _, d in zip(range(65), crp.new_schedule()): + self.assertEqual(d, delay) + class ExponentialReconnectionPolicyTest(unittest.TestCase): From 3cd9d50b1637f48ca8c10657e38fded3dd72f76b Mon Sep 17 00:00:00 2001 From: Kishan Karunaratne Date: Tue, 26 May 2015 10:32:46 -0700 Subject: [PATCH 1460/3726] [PYTHON-160] Test for default loadbalancing policy --- .../long/test_loadbalancingpolicies.py | 25 +++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/tests/integration/long/test_loadbalancingpolicies.py b/tests/integration/long/test_loadbalancingpolicies.py index cf5f3a191b..7b2b87957d 100644 --- a/tests/integration/long/test_loadbalancingpolicies.py +++ b/tests/integration/long/test_loadbalancingpolicies.py @@ -17,6 +17,7 @@ from cassandra import ConsistencyLevel, Unavailable, OperationTimedOut, ReadTimeout from cassandra.cluster import Cluster, NoHostAvailable from cassandra.concurrent import execute_concurrent_with_args +from cassandra.metadata import murmur3 from cassandra.policies import (RoundRobinPolicy, DCAwareRoundRobinPolicy, TokenAwarePolicy, WhiteListRoundRobinPolicy) from cassandra.query import SimpleStatement @@ -84,6 +85,30 @@ def _query(self, session, keyspace, count=12, log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) del tb + def test_token_aware_is_used_by_default(self): + """ + Test for default loadbalacing policy + + test_token_aware_is_used_by_default tests that the default loadbalancing policy is policies.TokenAwarePolicy. + It creates a simple Cluster and verifies that the default loadbalancing policy is TokenAwarePolicy if the + murmur3 C extension is found. Otherwise, the default loadbalancing policy is DCAwareRoundRobinPolicy. + + @since 2.6.0 + @jira_ticket PYTHON-160 + @expected_result TokenAwarePolicy should be the default loadbalancing policy. + + @test_category load_balancing:token_aware + """ + + cluster = Cluster(protocol_version=PROTOCOL_VERSION) + + if murmur3 is not None: + self.assertTrue(isinstance(cluster.load_balancing_policy, TokenAwarePolicy)) + else: + self.assertTrue(isinstance(cluster.load_balancing_policy, DCAwareRoundRobinPolicy)) + + cluster.shutdown() + def test_roundrobin(self): use_singledc() keyspace = 'test_roundrobin' From 330349ae572a7c8f06f27b992e1d53c27293c6fe Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 26 May 2015 16:16:19 -0500 Subject: [PATCH 1461/3726] Update type tests --- cassandra/util.py | 12 ++ tests/integration/datatype_utils.py | 31 +++--- tests/integration/standard/test_types.py | 135 ++++++++++------------- 3 files changed, 82 insertions(+), 96 deletions(-) diff --git a/cassandra/util.py b/cassandra/util.py index dd4633596d..16457df589 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -896,6 +896,9 @@ def _from_time(self, t): t.second * Time.SECOND + t.microsecond * Time.MICRO) + def __hash__(self): + return self.nanosecond_time + def __eq__(self, other): if isinstance(other, Time): return self.nanosecond_time == other.nanosecond_time @@ -907,6 +910,9 @@ def __eq__(self, other): datetime.time(hour=self.hour, minute=self.minute, second=self.second, microsecond=self.nanosecond // Time.MICRO) == other + def __lt__(self, other): + return self.nanosecond_time < other.nanosecond_time + def __repr__(self): return "Time(%s)" % self.nanosecond_time @@ -977,6 +983,9 @@ def _from_datestring(self, s): dt = datetime.datetime.strptime(s, self.date_format) self._from_timetuple(dt.timetuple()) + def __hash__(self): + return self.days_from_epoch + def __eq__(self, other): if isinstance(other, Date): return self.days_from_epoch == other.days_from_epoch @@ -989,6 +998,9 @@ def __eq__(self, other): except Exception: return False + def __lt__(self, other): + return self.days_from_epoch < other.days_from_epoch + def __repr__(self): return "Date(%s)" % self.days_from_epoch diff --git a/tests/integration/datatype_utils.py b/tests/integration/datatype_utils.py index a7d8aeb1f8..80087b67f1 100644 --- a/tests/integration/datatype_utils.py +++ b/tests/integration/datatype_utils.py @@ -16,17 +16,12 @@ from datetime import datetime, date, time from uuid import uuid1, uuid4 -try: - from blist import sortedset -except ImportError: - sortedset = set # noqa - -from cassandra.util import OrderedMap +from cassandra.util import OrderedMap, Date, Time, sortedset from tests.integration import get_server_versions -PRIMITIVE_DATATYPES = [ +PRIMITIVE_DATATYPES = sortedset([ 'ascii', 'bigint', 'blob', @@ -42,26 +37,26 @@ 'uuid', 'varchar', 'varint', -] +]) -COLLECTION_TYPES = [ +COLLECTION_TYPES = sortedset([ 'list', 'set', 'map', -] +]) def update_datatypes(): _cass_version, _cql_version = get_server_versions() if _cass_version >= (2, 1, 0): - COLLECTION_TYPES.append('tuple') + COLLECTION_TYPES.add('tuple') + + if _cass_version >= (2, 2, 0): + PRIMITIVE_DATATYPES.update(['date', 'time', 'smallint', 'tinyint']) - if _cass_version >= (3, 0, 0): - PRIMITIVE_DATATYPES.append('date') - PRIMITIVE_DATATYPES.append('time') - PRIMITIVE_DATATYPES.append('tinyint') - PRIMITIVE_DATATYPES.append('smallint') + global SAMPLE_DATA + SAMPLE_DATA = get_sample_data() def get_sample_data(): @@ -114,10 +109,10 @@ def get_sample_data(): sample_data[datatype] = int(str(2147483647) + '000') elif datatype == 'date': - sample_data[datatype] = date(2015, 1, 15) + sample_data[datatype] = Date(date(2015, 1, 15)) elif datatype == 'time': - sample_data[datatype] = time(16, 47, 25, 7) + sample_data[datatype] = Time(time(16, 47, 25, 7)) elif datatype == 'tinyint': sample_data[datatype] = 123 diff --git a/tests/integration/standard/test_types.py b/tests/integration/standard/test_types.py index c4821d9c87..ba2a9738a8 100644 --- a/tests/integration/standard/test_types.py +++ b/tests/integration/standard/test_types.py @@ -41,31 +41,28 @@ def setup_module(): class TypeTests(unittest.TestCase): - def setUp(self): - self._cass_version, self._cql_version = get_server_versions() - - self.cluster = Cluster(protocol_version=PROTOCOL_VERSION) - self.session = self.cluster.connect() - self.session.execute("CREATE KEYSPACE typetests WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1'}") - self.cluster.shutdown() - - def tearDown(self): - self.cluster = Cluster(protocol_version=PROTOCOL_VERSION) - self.session = self.cluster.connect() - self.session.execute("DROP KEYSPACE typetests") - self.cluster.shutdown() + @classmethod + def setUpClass(cls): + cls._cass_version, cls._cql_version = get_server_versions() + cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION) + cls.session = cls.cluster.connect() + cls.session.execute("CREATE KEYSPACE typetests WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1'}") + cls.session.set_keyspace("typetests") + + @classmethod + def tearDownClass(cls): + cls.session.execute("DROP KEYSPACE typetests") + cls.cluster.shutdown() def test_can_insert_blob_type_as_string(self): """ Tests that byte strings in Python maps to blob type in Cassandra """ - - c = Cluster(protocol_version=PROTOCOL_VERSION) - s = c.connect("typetests") + s = self.session s.execute("CREATE TABLE blobstring (a ascii PRIMARY KEY, b blob)") - params = ['key1', b'blobyblob'] + params = ['key1', b'blobbyblob'] query = "INSERT INTO blobstring (a, b) VALUES (%s, %s)" # In python2, with Cassandra > 2.0, we don't treat the 'byte str' type as a blob, so we'll encode it @@ -98,9 +95,7 @@ def test_can_insert_blob_type_as_bytearray(self): """ Tests that blob type in Cassandra maps to bytearray in Python """ - - c = Cluster(protocol_version=PROTOCOL_VERSION) - s = c.connect("typetests") + s = self.session s.execute("CREATE TABLE blobbytes (a ascii PRIMARY KEY, b blob)") @@ -111,13 +106,10 @@ def test_can_insert_blob_type_as_bytearray(self): for expected, actual in zip(params, results): self.assertEqual(expected, actual) - c.shutdown() - def test_can_insert_primitive_datatypes(self): """ Test insertion of all datatype primitives """ - c = Cluster(protocol_version=PROTOCOL_VERSION) s = c.connect("typetests") @@ -254,54 +246,62 @@ def test_can_insert_empty_strings_and_nulls(self): """ Test insertion of empty strings and null values """ - - c = Cluster(protocol_version=PROTOCOL_VERSION) - s = c.connect("typetests") + s = self.session # create table alpha_type_list = ["zz int PRIMARY KEY"] col_names = [] + string_types = set(('ascii', 'text', 'varchar')) + string_columns = set(('')) + # this is just a list of types to try with empty strings + non_string_types = PRIMITIVE_DATATYPES - string_types - set(('blob', 'date', 'inet', 'time', 'timestamp')) + non_string_columns = set() start_index = ord('a') for i, datatype in enumerate(PRIMITIVE_DATATYPES): - alpha_type_list.append("{0} {1}".format(chr(start_index + i), datatype)) - col_names.append(chr(start_index + i)) + col_name = chr(start_index + i) + alpha_type_list.append("{0} {1}".format(col_name, datatype)) + col_names.append(col_name) + if datatype in non_string_types: + non_string_columns.add(col_name) + if datatype in string_types: + string_columns.add(col_name) - s.execute("CREATE TABLE alltypes ({0})".format(', '.join(alpha_type_list))) + s.execute("CREATE TABLE all_empty ({0})".format(', '.join(alpha_type_list))) # verify all types initially null with simple statement columns_string = ','.join(col_names) - s.execute("INSERT INTO alltypes (zz) VALUES (2)") - results = s.execute("SELECT {0} FROM alltypes WHERE zz=2".format(columns_string))[0] + s.execute("INSERT INTO all_empty (zz) VALUES (2)") + results = s.execute("SELECT {0} FROM all_empty WHERE zz=2".format(columns_string))[0] self.assertTrue(all(x is None for x in results)) # verify all types initially null with prepared statement - select = s.prepare("SELECT {0} FROM alltypes WHERE zz=?".format(columns_string)) + select = s.prepare("SELECT {0} FROM all_empty WHERE zz=?".format(columns_string)) results = s.execute(select.bind([2]))[0] self.assertTrue(all(x is None for x in results)) # insert empty strings for string-like fields - expected_values = {'j': '', 'a': '', 'n': ''} - columns_string = ','.join(expected_values.keys()) - placeholders = ','.join(["%s"] * len(expected_values)) - s.execute("INSERT INTO alltypes (zz, {0}) VALUES (3, {1})".format(columns_string, placeholders), expected_values.values()) + expected_values = dict((col, '') for col in string_columns) + columns_string = ','.join(string_columns) + placeholders = ','.join(["%s"] * len(string_columns)) + s.execute("INSERT INTO all_empty (zz, {0}) VALUES (3, {1})".format(columns_string, placeholders), expected_values.values()) # verify string types empty with simple statement - results = s.execute("SELECT {0} FROM alltypes WHERE zz=3".format(columns_string))[0] + results = s.execute("SELECT {0} FROM all_empty WHERE zz=3".format(columns_string))[0] for expected, actual in zip(expected_values.values(), results): self.assertEqual(actual, expected) # verify string types empty with prepared statement - results = s.execute(s.prepare("SELECT {0} FROM alltypes WHERE zz=?".format(columns_string)), [3])[0] + results = s.execute(s.prepare("SELECT {0} FROM all_empty WHERE zz=?".format(columns_string)), [3])[0] for expected, actual in zip(expected_values.values(), results): self.assertEqual(actual, expected) # non-string types shouldn't accept empty strings - for col in ('b', 'd', 'e', 'f', 'g', 'i', 'l', 'm', 'o'): - query = "INSERT INTO alltypes (zz, {0}) VALUES (4, %s)".format(col) + for col in non_string_columns: + query = "INSERT INTO all_empty (zz, {0}) VALUES (4, %s)".format(col) with self.assertRaises(InvalidRequest): s.execute(query, ['']) - insert = s.prepare("INSERT INTO alltypes (zz, {0}) VALUES (4, ?)".format(col)) + insert = s.prepare("INSERT INTO all_empty (zz, {0}) VALUES (4, ?)".format(col)) with self.assertRaises(TypeError): s.execute(insert, ['']) @@ -314,7 +314,7 @@ def test_can_insert_empty_strings_and_nulls(self): # insert the data columns_string = ','.join(col_names) placeholders = ','.join(["%s"] * len(col_names)) - simple_insert = "INSERT INTO alltypes (zz, {0}) VALUES (5, {1})".format(columns_string, placeholders) + simple_insert = "INSERT INTO all_empty (zz, {0}) VALUES (5, {1})".format(columns_string, placeholders) s.execute(simple_insert, params) # then insert None, which should null them out @@ -322,13 +322,13 @@ def test_can_insert_empty_strings_and_nulls(self): s.execute(simple_insert, null_values) # check via simple statement - query = "SELECT {0} FROM alltypes WHERE zz=5".format(columns_string) + query = "SELECT {0} FROM all_empty WHERE zz=5".format(columns_string) results = s.execute(query)[0] for col in results: self.assertEqual(None, col) # check via prepared statement - select = s.prepare("SELECT {0} FROM alltypes WHERE zz=?".format(columns_string)) + select = s.prepare("SELECT {0} FROM all_empty WHERE zz=?".format(columns_string)) results = s.execute(select.bind([5]))[0] for col in results: self.assertEqual(None, col) @@ -337,7 +337,7 @@ def test_can_insert_empty_strings_and_nulls(self): s.execute(simple_insert, params) placeholders = ','.join(["?"] * len(col_names)) - insert = s.prepare("INSERT INTO alltypes (zz, {0}) VALUES (5, {1})".format(columns_string, placeholders)) + insert = s.prepare("INSERT INTO all_empty (zz, {0}) VALUES (5, {1})".format(columns_string, placeholders)) s.execute(insert, null_values) results = s.execute(query)[0] @@ -348,15 +348,11 @@ def test_can_insert_empty_strings_and_nulls(self): for col in results: self.assertEqual(None, col) - s.shutdown() - def test_can_insert_empty_values_for_int32(self): """ Ensure Int32Type supports empty values """ - - c = Cluster(protocol_version=PROTOCOL_VERSION) - s = c.connect("typetests") + s = self.session s.execute("CREATE TABLE empty_values (a text PRIMARY KEY, b int)") s.execute("INSERT INTO empty_values (a, b) VALUES ('a', blobAsInt(0x))") @@ -367,8 +363,6 @@ def test_can_insert_empty_values_for_int32(self): finally: Int32Type.support_empty_values = False - c.shutdown() - def test_timezone_aware_datetimes_are_timestamps(self): """ Ensure timezone-aware datetimes are converted to timestamps correctly @@ -383,8 +377,7 @@ def test_timezone_aware_datetimes_are_timestamps(self): eastern_tz = pytz.timezone('US/Eastern') eastern_tz.localize(dt) - c = Cluster(protocol_version=PROTOCOL_VERSION) - s = c.connect("typetests") + s = self.session s.execute("CREATE TABLE tz_aware (a ascii PRIMARY KEY, b timestamp)") @@ -399,8 +392,6 @@ def test_timezone_aware_datetimes_are_timestamps(self): result = s.execute("SELECT b FROM tz_aware WHERE a='key2'")[0].b self.assertEqual(dt.utctimetuple(), result.utctimetuple()) - c.shutdown() - def test_can_insert_tuples(self): """ Basic test of tuple functionality @@ -508,17 +499,16 @@ def test_can_insert_tuples_all_primitive_datatypes(self): "k int PRIMARY KEY, " "v frozen>)" % ','.join(PRIMITIVE_DATATYPES)) - for i in range(len(PRIMITIVE_DATATYPES)): + values = [] + type_count = len(PRIMITIVE_DATATYPES) + for i, data_type in enumerate(PRIMITIVE_DATATYPES): # create tuples to be written and ensure they match with the expected response # responses have trailing None values for every element that has not been written - created_tuple = [get_sample(PRIMITIVE_DATATYPES[j]) for j in range(i + 1)] - response_tuple = tuple(created_tuple + [None for j in range(len(PRIMITIVE_DATATYPES) - i - 1)]) - written_tuple = tuple(created_tuple) - - s.execute("INSERT INTO tuple_primitive (k, v) VALUES (%s, %s)", (i, written_tuple)) - + values.append(get_sample(data_type)) + expected = tuple(values + [None] * (type_count - len(values))) + s.execute("INSERT INTO tuple_primitive (k, v) VALUES (%s, %s)", (i, tuple(values))) result = s.execute("SELECT v FROM tuple_primitive WHERE k=%s", (i,))[0] - self.assertEqual(response_tuple, result.v) + self.assertEqual(result.v, expected) c.shutdown() def test_can_insert_tuples_all_collection_datatypes(self): @@ -668,8 +658,7 @@ def test_can_insert_tuples_with_nulls(self): if self._cass_version < (2, 1, 0): raise unittest.SkipTest("The tuple type was introduced in Cassandra 2.1") - c = Cluster(protocol_version=PROTOCOL_VERSION) - s = c.connect("typetests") + s = self.session s.execute("CREATE TABLE tuples_nulls (k int PRIMARY KEY, t frozen>)") @@ -688,28 +677,20 @@ def test_can_insert_tuples_with_nulls(self): self.assertEqual(('', None, None, b''), result[0].t) self.assertEqual(('', None, None, b''), s.execute(read)[0].t) - c.shutdown() - def test_can_insert_unicode_query_string(self): """ Test to ensure unicode strings can be used in a query """ - - c = Cluster(protocol_version=PROTOCOL_VERSION) - s = c.connect("typetests") + s = self.session query = u"SELECT * FROM system.schema_columnfamilies WHERE keyspace_name = 'ef\u2052ef' AND columnfamily_name = %s" s.execute(query, (u"fe\u2051fe",)) - c.shutdown() - def test_can_read_composite_type(self): """ Test to ensure that CompositeTypes can be used in a query """ - - c = Cluster(protocol_version=PROTOCOL_VERSION) - s = c.connect("typetests") + s = self.session s.execute(""" CREATE TABLE composites ( @@ -728,5 +709,3 @@ def test_can_read_composite_type(self): result = s.execute("SELECT * FROM composites WHERE a = 0")[0] self.assertEqual(0, result.a) self.assertEqual(('abc',), result.b) - - c.shutdown() From abdbf454190f86baeaec72cfe0753db779ac3dc2 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 26 May 2015 16:28:01 -0500 Subject: [PATCH 1462/3726] UNSET Prepared test updates peer review input --- .../standard/test_prepared_statements.py | 49 ++++++++++--------- tests/unit/test_parameter_binding.py | 2 - 2 files changed, 26 insertions(+), 25 deletions(-) diff --git a/tests/integration/standard/test_prepared_statements.py b/tests/integration/standard/test_prepared_statements.py index 8c50d7b747..42c3894e83 100644 --- a/tests/integration/standard/test_prepared_statements.py +++ b/tests/integration/standard/test_prepared_statements.py @@ -28,6 +28,7 @@ def setup_module(): use_singledc() + class PreparedStatementTests(unittest.TestCase): def test_basic(self): @@ -65,9 +66,9 @@ def test_basic(self): session.execute(bound) prepared = session.prepare( - """ - SELECT * FROM cf0 WHERE a=? - """) + """ + SELECT * FROM cf0 WHERE a=? + """) self.assertIsInstance(prepared, PreparedStatement) bound = prepared.bind(('a')) @@ -90,9 +91,9 @@ def test_basic(self): session.execute(bound) prepared = session.prepare( - """ - SELECT * FROM cf0 WHERE a=? - """) + """ + SELECT * FROM cf0 WHERE a=? + """) self.assertIsInstance(prepared, PreparedStatement) @@ -216,9 +217,9 @@ def test_none_values(self): session.execute(bound) prepared = session.prepare( - """ - SELECT * FROM test3rf.test WHERE k=? - """) + """ + SELECT * FROM test3rf.test WHERE k=? + """) self.assertIsInstance(prepared, PreparedStatement) bound = prepared.bind((1,)) @@ -249,8 +250,8 @@ def test_unset_values(self): # table with at least two values so one can be used as a marker session.execute("CREATE TABLE IF NOT EXISTS test1rf.test_unset_values (k int PRIMARY KEY, v0 int, v1 int)") - insert = session.prepare( "INSERT INTO test1rf.test_unset_values (k, v0, v1) VALUES (?, ?, ?)") - select = session.prepare( "SELECT * FROM test1rf.test_unset_values WHERE k=?") + insert = session.prepare("INSERT INTO test1rf.test_unset_values (k, v0, v1) VALUES (?, ?, ?)") + select = session.prepare("SELECT * FROM test1rf.test_unset_values WHERE k=?") bind_expected = [ # initial condition @@ -273,6 +274,8 @@ def test_unset_values(self): results = session.execute(select, (0,)) self.assertEqual(results[0], expected) + self.assertRaises(ValueError, session.execute, select, (UNSET_VALUE, 0, 0)) + cluster.shutdown() def test_no_meta(self): @@ -289,9 +292,9 @@ def test_no_meta(self): session.execute(bound) prepared = session.prepare( - """ - SELECT * FROM test3rf.test WHERE k=0 - """) + """ + SELECT * FROM test3rf.test WHERE k=0 + """) self.assertIsInstance(prepared, PreparedStatement) bound = prepared.bind(None) @@ -319,9 +322,9 @@ def test_none_values_dicts(self): session.execute(bound) prepared = session.prepare( - """ - SELECT * FROM test3rf.test WHERE k=? - """) + """ + SELECT * FROM test3rf.test WHERE k=? + """) self.assertIsInstance(prepared, PreparedStatement) bound = prepared.bind({'k': 1}) @@ -348,9 +351,9 @@ def test_async_binding(self): future.result() prepared = session.prepare( - """ - SELECT * FROM test3rf.test WHERE k=? - """) + """ + SELECT * FROM test3rf.test WHERE k=? + """) self.assertIsInstance(prepared, PreparedStatement) future = session.execute_async(prepared, (873,)) @@ -377,9 +380,9 @@ def test_async_binding_dicts(self): future.result() prepared = session.prepare( - """ - SELECT * FROM test3rf.test WHERE k=? - """) + """ + SELECT * FROM test3rf.test WHERE k=? + """) self.assertIsInstance(prepared, PreparedStatement) future = session.execute_async(prepared, {'k': 873}) diff --git a/tests/unit/test_parameter_binding.py b/tests/unit/test_parameter_binding.py index 6b84260279..a53a9f6dfb 100644 --- a/tests/unit/test_parameter_binding.py +++ b/tests/unit/test_parameter_binding.py @@ -146,8 +146,6 @@ def test_dict_missing_routing_key(self): def test_missing_value(self): self.assertRaises(KeyError, self.bound.bind, {'rk0': 0, 'rk1': 0, 'ck0': 0}) - # legacy behavior: short parameters are allowed through, errored by server - #self.assertRaises(ValueError, self.bound.bind, (0, 0, 0)) def test_dict_extra_value(self): self.assertRaises(ValueError, self.bound.bind, {'rk0': 0, 'rk1': 0, 'ck0': 0, 'v0': 0, 'should_not_be_here': 123}) From 05b60071fb713b5229015ec7c640207f2156bbf9 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Tue, 26 May 2015 13:59:29 -0500 Subject: [PATCH 1463/3726] [PYTHON-311] Test for inserting collections of UDTs --- .../integration/cqlengine/model/test_udts.py | 42 ++++++++++++++++++- 1 file changed, 41 insertions(+), 1 deletion(-) diff --git a/tests/integration/cqlengine/model/test_udts.py b/tests/integration/cqlengine/model/test_udts.py index af2abaa0e5..0d94d5d096 100644 --- a/tests/integration/cqlengine/model/test_udts.py +++ b/tests/integration/cqlengine/model/test_udts.py @@ -15,7 +15,7 @@ from datetime import date, datetime from decimal import Decimal import unittest -from uuid import UUID +from uuid import UUID, uuid4 from cassandra.cqlengine.models import Model from cassandra.cqlengine.usertype import UserType @@ -286,3 +286,43 @@ class AllDatatypesModel(Model): for i in range(ord('a'), ord('a') + 15): self.assertEqual(input[chr(i)], output[chr(i)]) + + def test_nested_udts_inserts(self): + """ + Test for inserting collections of user types using cql engine. + + test_nested_udts_inserts Constructs a model that contains a list of usertypes. It will then attempt to insert + them. The expectation is that no exception is thrown during insert. For sanity sake we also validate that our + input and output values match. This combination of model, and UT produces a syntax error in 2.5.1 due to + improper quoting around the names collection. + + @since 2.6.0 + @jira_ticket PYTHON-311 + @expected_result No syntax exception thrown + + @test_category data_types:udt + """ + + class Name(UserType): + type_name__ = "header" + + name = columns.Text() + value = columns.Text() + + class Container(Model): + id = columns.UUID(primary_key=True, default=uuid4) + names = columns.List(columns.UserDefinedType(Name())) + + # Construct the objects and insert them + names = [] + for i in range(0, 10): + names.append(Name(name="name{0}".format(i), value="value{0}".format(i))) + + # Create table, insert data + sync_table(Container) + Container.create(id=UUID('FE2B4360-28C6-11E2-81C1-0800200C9A66'), names=names) + + # Validate input and output matches + self.assertEqual(1, Container.objects.count()) + names_output = Container.objects().first().names + self.assertEqual(names_output, names) From dca25dac784bb6657422e959392b4ba5765d522e Mon Sep 17 00:00:00 2001 From: Kishan Karunaratne Date: Tue, 26 May 2015 15:49:22 -0700 Subject: [PATCH 1464/3726] [PYTHON-245] Tests for Date, Time, SmallInt, TinyInt in cqlengine --- .../cqlengine/model/test_model_io.py | 26 ++++++++------ .../integration/cqlengine/model/test_udts.py | 36 +++++++++++-------- 2 files changed, 37 insertions(+), 25 deletions(-) diff --git a/tests/integration/cqlengine/model/test_model_io.py b/tests/integration/cqlengine/model/test_model_io.py index afee65d698..1c274353ac 100644 --- a/tests/integration/cqlengine/model/test_model_io.py +++ b/tests/integration/cqlengine/model/test_model_io.py @@ -14,7 +14,7 @@ from uuid import uuid4, UUID import random -from datetime import date, datetime +from datetime import datetime, date, time from decimal import Decimal from operator import itemgetter @@ -23,7 +23,7 @@ from cassandra.cqlengine.management import sync_table from cassandra.cqlengine.management import drop_table from cassandra.cqlengine.models import Model -from cassandra.util import Date +from cassandra.util import Date, Time from tests.integration.cqlengine.base import BaseCassEngTestCase @@ -155,24 +155,28 @@ class AllDatatypesModel(Model): i = columns.Float(double_precision=False) j = columns.Inet() k = columns.Integer() - l = columns.Text() - m = columns.TimeUUID() - n = columns.UUID() - o = columns.VarInt() + l = columns.SmallInt() + m = columns.Text() + n = columns.Time() + o = columns.TimeUUID() + p = columns.TinyInt() + q = columns.UUID() + r = columns.VarInt() sync_table(AllDatatypesModel) input = ['ascii', 2 ** 63 - 1, bytearray(b'hello world'), True, Date(date(1970, 1, 1)), datetime.utcfromtimestamp(872835240), Decimal('12.3E+7'), 2.39, - 3.4028234663852886e+38, '123.123.123.123', 2147483647, 'text', - UUID('FE2B4360-28C6-11E2-81C1-0800200C9A66'), UUID('067e6162-3b6f-4ae2-a171-2470b63dff00'), + 3.4028234663852886e+38, '123.123.123.123', 2147483647, 32523, 'text', Time(time(16, 47, 25, 7)), + UUID('FE2B4360-28C6-11E2-81C1-0800200C9A66'), 123, UUID('067e6162-3b6f-4ae2-a171-2470b63dff00'), int(str(2147483647) + '000')] AllDatatypesModel.create(id=0, a='ascii', b=2 ** 63 - 1, c=bytearray(b'hello world'), d=True, e=date(1970, 1, 1), f=datetime.utcfromtimestamp(872835240), g=Decimal('12.3E+7'), h=2.39, - i=3.4028234663852886e+38, j='123.123.123.123', k=2147483647, l='text', - m=UUID('FE2B4360-28C6-11E2-81C1-0800200C9A66'), n=UUID('067e6162-3b6f-4ae2-a171-2470b63dff00'), - o=int(str(2147483647) + '000')) + i=3.4028234663852886e+38, j='123.123.123.123', k=2147483647, l=32523, m='text', + n=time(16, 47, 25, 7), o=UUID('FE2B4360-28C6-11E2-81C1-0800200C9A66'), + p=123, q=UUID('067e6162-3b6f-4ae2-a171-2470b63dff00'), + r=int(str(2147483647) + '000')) self.assertEqual(1, AllDatatypesModel.objects.count()) output = AllDatatypesModel.objects().first() diff --git a/tests/integration/cqlengine/model/test_udts.py b/tests/integration/cqlengine/model/test_udts.py index af2abaa0e5..84220cd036 100644 --- a/tests/integration/cqlengine/model/test_udts.py +++ b/tests/integration/cqlengine/model/test_udts.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from datetime import date, datetime +from datetime import datetime, date, time from decimal import Decimal import unittest from uuid import UUID @@ -21,7 +21,7 @@ from cassandra.cqlengine.usertype import UserType from cassandra.cqlengine import columns from cassandra.cqlengine.management import sync_table, sync_type, create_keyspace_simple, drop_keyspace -from cassandra.util import Date +from cassandra.util import Date, Time from tests.integration import get_server_versions from tests.integration.cqlengine.base import BaseCassEngTestCase @@ -216,10 +216,13 @@ class AllDatatypes(UserType): i = columns.Float(double_precision=False) j = columns.Inet() k = columns.Integer() - l = columns.Text() - m = columns.TimeUUID() - n = columns.UUID() - o = columns.VarInt() + l = columns.SmallInt() + m = columns.Text() + n = columns.Time() + o = columns.TimeUUID() + p = columns.TinyInt() + q = columns.UUID() + r = columns.VarInt() class AllDatatypesModel(Model): id = columns.Integer(primary_key=True) @@ -227,7 +230,8 @@ class AllDatatypesModel(Model): sync_table(AllDatatypesModel) - input = AllDatatypes(a=None, b=None, c=None, d=None, e=None, f=None, g=None, h=None, i=None, j=None, k=None, l=None, m=None, n=None) + input = AllDatatypes(a=None, b=None, c=None, d=None, e=None, f=None, g=None, h=None, i=None, j=None, k=None, + l=None, m=None, n=None, o=None, p=None, q=None, r=None) AllDatatypesModel.create(id=0, data=input) self.assertEqual(1, AllDatatypesModel.objects.count()) @@ -263,10 +267,13 @@ class AllDatatypes(UserType): i = columns.Float(double_precision=False) j = columns.Inet() k = columns.Integer() - l = columns.Text() - m = columns.TimeUUID() - n = columns.UUID() - o = columns.VarInt() + l = columns.SmallInt() + m = columns.Text() + n = columns.Time() + o = columns.TimeUUID() + p = columns.TinyInt() + q = columns.UUID() + r = columns.VarInt() class AllDatatypesModel(Model): id = columns.Integer(primary_key=True) @@ -276,9 +283,10 @@ class AllDatatypesModel(Model): input = AllDatatypes(a='ascii', b=2 ** 63 - 1, c=bytearray(b'hello world'), d=True, e=Date(date(1970, 1, 1)), f=datetime.utcfromtimestamp(872835240), g=Decimal('12.3E+7'), h=2.39, - i=3.4028234663852886e+38, j='123.123.123.123', k=2147483647, l='text', - m=UUID('FE2B4360-28C6-11E2-81C1-0800200C9A66'), n=UUID('067e6162-3b6f-4ae2-a171-2470b63dff00'), - o=int(str(2147483647) + '000')) + i=3.4028234663852886e+38, j='123.123.123.123', k=2147483647, l=32523, m='text', + n=Time(time(16, 47, 25, 7)), o=UUID('FE2B4360-28C6-11E2-81C1-0800200C9A66'), + p=123, q=UUID('067e6162-3b6f-4ae2-a171-2470b63dff00'), + r=int(str(2147483647) + '000')) AllDatatypesModel.create(id=0, data=input) self.assertEqual(1, AllDatatypesModel.objects.count()) From c0b8c1351aefc254893ed69e92906bf319c43c59 Mon Sep 17 00:00:00 2001 From: Kishan Karunaratne Date: Tue, 26 May 2015 20:35:40 -0700 Subject: [PATCH 1465/3726] Fix failing cqlengine tests related to protocol v4 datatypes --- .../cqlengine/columns/test_validation.py | 6 +- .../cqlengine/columns/test_value_io.py | 63 +++++++++- .../cqlengine/model/test_model_io.py | 87 ++++++++++---- .../integration/cqlengine/model/test_udts.py | 112 +++++++++++------- 4 files changed, 197 insertions(+), 71 deletions(-) diff --git a/tests/integration/cqlengine/columns/test_validation.py b/tests/integration/cqlengine/columns/test_validation.py index 94d0d156e4..0d790bfc60 100644 --- a/tests/integration/cqlengine/columns/test_validation.py +++ b/tests/integration/cqlengine/columns/test_validation.py @@ -14,7 +14,7 @@ from datetime import datetime, timedelta, date, tzinfo from decimal import Decimal as D -from unittest import TestCase +from unittest import TestCase, SkipTest from uuid import uuid4, uuid1 from cassandra import InvalidRequest @@ -34,6 +34,7 @@ from cassandra.cqlengine.models import Model, ValidationError from cassandra import util +from tests.integration import PROTOCOL_VERSION from tests.integration.cqlengine.base import BaseCassEngTestCase @@ -153,6 +154,9 @@ class DateTest(Model): @classmethod def setUpClass(cls): + if PROTOCOL_VERSION < 4: + raise SkipTest("Protocol v4 datatypes require native protocol 4+, currently using: {0}".format(PROTOCOL_VERSION)) + super(TestDate, cls).setUpClass() sync_table(cls.DateTest) diff --git a/tests/integration/cqlengine/columns/test_value_io.py b/tests/integration/cqlengine/columns/test_value_io.py index 9b5947dff3..243f7096ad 100644 --- a/tests/integration/cqlengine/columns/test_value_io.py +++ b/tests/integration/cqlengine/columns/test_value_io.py @@ -12,17 +12,20 @@ # See the License for the specific language governing permissions and # limitations under the License. -from datetime import datetime, timedelta +from datetime import datetime, timedelta, time from decimal import Decimal from uuid import uuid1, uuid4, UUID import six +import unittest from cassandra.cqlengine import columns from cassandra.cqlengine.management import sync_table from cassandra.cqlengine.management import drop_table from cassandra.cqlengine.models import Model -from cassandra import util +from cassandra.util import Date, Time + +from tests.integration import PROTOCOL_VERSION from tests.integration.cqlengine.base import BaseCassEngTestCase @@ -55,10 +58,10 @@ def setUpClass(cls): # create a table with the given column class IOTestModel(Model): - table_name = cls.column.db_type + "_io_test_model_{}".format(uuid4().hex[:8]) pkey = cls.column(primary_key=True) data = cls.column() + cls._generated_model = IOTestModel sync_table(cls._generated_model) @@ -149,11 +152,18 @@ class TestDateTime(BaseColumnIOTest): class TestDate(BaseColumnIOTest): + @classmethod + def setUpClass(cls): + if PROTOCOL_VERSION < 4: + raise unittest.SkipTest("Protocol v4 datatypes require native protocol 4+, currently using: {0}".format(PROTOCOL_VERSION)) + + super(TestDate, cls).setUpClass() + column = columns.Date - now = util.Date(datetime.now().date()) + now = Date(datetime.now().date()) pkey_val = now - data_val = util.Date(now.days_from_epoch + 1) + data_val = Date(now.days_from_epoch + 1) class TestUUID(BaseColumnIOTest): @@ -210,3 +220,46 @@ class TestDecimalIO(BaseColumnIOTest): def comparator_converter(self, val): return Decimal(val) +class TestTime(BaseColumnIOTest): + + @classmethod + def setUpClass(cls): + if PROTOCOL_VERSION < 4: + raise unittest.SkipTest("Protocol v4 datatypes require native protocol 4+, currently using: {0}".format(PROTOCOL_VERSION)) + + super(TestTime, cls).setUpClass() + + column = columns.Time + + pkey_val = Time(time(2, 12, 7, 48)) + data_val = Time(time(16, 47, 25, 7)) + + +class TestSmallInt(BaseColumnIOTest): + + @classmethod + def setUpClass(cls): + if PROTOCOL_VERSION < 4: + raise unittest.SkipTest("Protocol v4 datatypes require native protocol 4+, currently using: {0}".format(PROTOCOL_VERSION)) + + super(TestSmallInt, cls).setUpClass() + + column = columns.SmallInt + + pkey_val = 16768 + data_val = 32523 + + +class TestTinyInt(BaseColumnIOTest): + + @classmethod + def setUpClass(cls): + if PROTOCOL_VERSION < 4: + raise unittest.SkipTest("Protocol v4 datatypes require native protocol 4+, currently using: {0}".format(PROTOCOL_VERSION)) + + super(TestTinyInt, cls).setUpClass() + + column = columns.TinyInt + + pkey_val = 1 + data_val = 123 \ No newline at end of file diff --git a/tests/integration/cqlengine/model/test_model_io.py b/tests/integration/cqlengine/model/test_model_io.py index 1c274353ac..436e8955fb 100644 --- a/tests/integration/cqlengine/model/test_model_io.py +++ b/tests/integration/cqlengine/model/test_model_io.py @@ -14,6 +14,7 @@ from uuid import uuid4, UUID import random +import unittest from datetime import datetime, date, time from decimal import Decimal from operator import itemgetter @@ -25,6 +26,7 @@ from cassandra.cqlengine.models import Model from cassandra.util import Date, Time +from tests.integration import PROTOCOL_VERSION from tests.integration.cqlengine.base import BaseCassEngTestCase class TestModel(Model): @@ -148,40 +150,72 @@ class AllDatatypesModel(Model): b = columns.BigInt() c = columns.Blob() d = columns.Boolean() - e = columns.Date() - f = columns.DateTime() - g = columns.Decimal() - h = columns.Double() - i = columns.Float(double_precision=False) - j = columns.Inet() - k = columns.Integer() - l = columns.SmallInt() - m = columns.Text() - n = columns.Time() - o = columns.TimeUUID() - p = columns.TinyInt() - q = columns.UUID() - r = columns.VarInt() + e = columns.DateTime() + f = columns.Decimal() + g = columns.Double() + h = columns.Float(double_precision=False) + i = columns.Inet() + j = columns.Integer() + k = columns.Text() + l = columns.TimeUUID() + m = columns.UUID() + n = columns.VarInt() sync_table(AllDatatypesModel) - input = ['ascii', 2 ** 63 - 1, bytearray(b'hello world'), True, Date(date(1970, 1, 1)), - datetime.utcfromtimestamp(872835240), Decimal('12.3E+7'), 2.39, - 3.4028234663852886e+38, '123.123.123.123', 2147483647, 32523, 'text', Time(time(16, 47, 25, 7)), - UUID('FE2B4360-28C6-11E2-81C1-0800200C9A66'), 123, UUID('067e6162-3b6f-4ae2-a171-2470b63dff00'), + input = ['ascii', 2 ** 63 - 1, bytearray(b'hello world'), True, datetime.utcfromtimestamp(872835240), + Decimal('12.3E+7'), 2.39, 3.4028234663852886e+38, '123.123.123.123', 2147483647, 'text', + UUID('FE2B4360-28C6-11E2-81C1-0800200C9A66'), UUID('067e6162-3b6f-4ae2-a171-2470b63dff00'), int(str(2147483647) + '000')] - AllDatatypesModel.create(id=0, a='ascii', b=2 ** 63 - 1, c=bytearray(b'hello world'), d=True, e=date(1970, 1, 1), - f=datetime.utcfromtimestamp(872835240), g=Decimal('12.3E+7'), h=2.39, - i=3.4028234663852886e+38, j='123.123.123.123', k=2147483647, l=32523, m='text', - n=time(16, 47, 25, 7), o=UUID('FE2B4360-28C6-11E2-81C1-0800200C9A66'), - p=123, q=UUID('067e6162-3b6f-4ae2-a171-2470b63dff00'), - r=int(str(2147483647) + '000')) + AllDatatypesModel.create(id=0, a='ascii', b=2 ** 63 - 1, c=bytearray(b'hello world'), d=True, + e=datetime.utcfromtimestamp(872835240), f=Decimal('12.3E+7'), g=2.39, + h=3.4028234663852886e+38, i='123.123.123.123', j=2147483647, k='text', + l=UUID('FE2B4360-28C6-11E2-81C1-0800200C9A66'), + m=UUID('067e6162-3b6f-4ae2-a171-2470b63dff00'), n=int(str(2147483647) + '000')) self.assertEqual(1, AllDatatypesModel.objects.count()) output = AllDatatypesModel.objects().first() - for i, i_char in enumerate(range(ord('a'), ord('a') + 15)): + for i, i_char in enumerate(range(ord('a'), ord('a') + 14)): + self.assertEqual(input[i], output[chr(i_char)]) + + def test_can_insert_model_with_all_protocol_v4_column_types(self): + """ + Test for inserting all protocol v4 column types into a Model + + test_can_insert_model_with_all_protocol_v4_column_types tests that each cqlengine protocol v4 column type can be + inserted into a Model. It first creates a Model that has each cqlengine protocol v4 column type. It then creates + a Model instance where all the fields have corresponding data, which performs the insert into the Cassandra table. + Finally, it verifies that each column read from the Model from Cassandra is the same as the input parameters. + + @since 2.6.0 + @jira_ticket PYTHON-245 + @expected_result The Model is inserted with each protocol v4 column type, and the resulting read yields proper data for each column. + + @test_category data_types:primitive + """ + + if PROTOCOL_VERSION < 4: + raise unittest.SkipTest("Protocol v4 datatypes require native protocol 4+, currently using: {0}".format(PROTOCOL_VERSION)) + + class AllDatatypesModel(Model): + id = columns.Integer(primary_key=True) + a = columns.Date() + b = columns.SmallInt() + c = columns.Time() + d = columns.TinyInt() + + sync_table(AllDatatypesModel) + + input = [Date(date(1970, 1, 1)), 32523, Time(time(16, 47, 25, 7)), 123] + + AllDatatypesModel.create(id=0, a=date(1970, 1, 1), b=32523, c=time(16, 47, 25, 7), d=123) + + self.assertEqual(1, AllDatatypesModel.objects.count()) + output = AllDatatypesModel.objects().first() + + for i, i_char in enumerate(range(ord('a'), ord('a') + 3)): self.assertEqual(input[i], output[chr(i_char)]) def test_can_insert_double_and_float(self): @@ -399,6 +433,9 @@ class TestQuerying(BaseCassEngTestCase): @classmethod def setUpClass(cls): + if PROTOCOL_VERSION < 4: + raise unittest.SkipTest("Date query tests require native protocol 4+, currently using: {0}".format(PROTOCOL_VERSION)) + super(TestQuerying, cls).setUpClass() drop_table(TestQueryModel) sync_table(TestQueryModel) diff --git a/tests/integration/cqlengine/model/test_udts.py b/tests/integration/cqlengine/model/test_udts.py index 681f7ba8f1..49e934c53e 100644 --- a/tests/integration/cqlengine/model/test_udts.py +++ b/tests/integration/cqlengine/model/test_udts.py @@ -23,7 +23,7 @@ from cassandra.cqlengine.management import sync_table, sync_type, create_keyspace_simple, drop_keyspace from cassandra.util import Date, Time -from tests.integration import get_server_versions +from tests.integration import get_server_versions, PROTOCOL_VERSION from tests.integration.cqlengine.base import BaseCassEngTestCase @@ -31,8 +31,8 @@ class UserDefinedTypeTests(BaseCassEngTestCase): @classmethod def setUpClass(self): - if get_server_versions()[0] < (2, 1, 0): - raise unittest.SkipTest("UDTs require Cassandra 2.1 or greater") + if PROTOCOL_VERSION < 3: + raise unittest.SkipTest("UDTs require native protocol 3+, currently using: {0}".format(PROTOCOL_VERSION)) def test_can_create_udts(self): class User(UserType): @@ -209,20 +209,16 @@ class AllDatatypes(UserType): b = columns.BigInt() c = columns.Blob() d = columns.Boolean() - e = columns.Date() - f = columns.DateTime() - g = columns.Decimal() - h = columns.Double() - i = columns.Float(double_precision=False) - j = columns.Inet() - k = columns.Integer() - l = columns.SmallInt() - m = columns.Text() - n = columns.Time() - o = columns.TimeUUID() - p = columns.TinyInt() - q = columns.UUID() - r = columns.VarInt() + e = columns.DateTime() + f = columns.Decimal() + g = columns.Double() + h = columns.Float(double_precision=False) + i = columns.Inet() + j = columns.Integer() + k = columns.Text() + l = columns.TimeUUID() + m = columns.UUID() + n = columns.VarInt() class AllDatatypesModel(Model): id = columns.Integer(primary_key=True) @@ -231,7 +227,7 @@ class AllDatatypesModel(Model): sync_table(AllDatatypesModel) input = AllDatatypes(a=None, b=None, c=None, d=None, e=None, f=None, g=None, h=None, i=None, j=None, k=None, - l=None, m=None, n=None, o=None, p=None, q=None, r=None) + l=None, m=None, n=None) AllDatatypesModel.create(id=0, data=input) self.assertEqual(1, AllDatatypesModel.objects.count()) @@ -260,20 +256,16 @@ class AllDatatypes(UserType): b = columns.BigInt() c = columns.Blob() d = columns.Boolean() - e = columns.Date() - f = columns.DateTime() - g = columns.Decimal() - h = columns.Double() - i = columns.Float(double_precision=False) - j = columns.Inet() - k = columns.Integer() - l = columns.SmallInt() - m = columns.Text() - n = columns.Time() - o = columns.TimeUUID() - p = columns.TinyInt() - q = columns.UUID() - r = columns.VarInt() + e = columns.DateTime() + f = columns.Decimal() + g = columns.Double() + h = columns.Float(double_precision=False) + i = columns.Inet() + j = columns.Integer() + k = columns.Text() + l = columns.TimeUUID() + m = columns.UUID() + n = columns.VarInt() class AllDatatypesModel(Model): id = columns.Integer(primary_key=True) @@ -281,18 +273,58 @@ class AllDatatypesModel(Model): sync_table(AllDatatypesModel) - input = AllDatatypes(a='ascii', b=2 ** 63 - 1, c=bytearray(b'hello world'), d=True, e=Date(date(1970, 1, 1)), - f=datetime.utcfromtimestamp(872835240), g=Decimal('12.3E+7'), h=2.39, - i=3.4028234663852886e+38, j='123.123.123.123', k=2147483647, l=32523, m='text', - n=Time(time(16, 47, 25, 7)), o=UUID('FE2B4360-28C6-11E2-81C1-0800200C9A66'), - p=123, q=UUID('067e6162-3b6f-4ae2-a171-2470b63dff00'), - r=int(str(2147483647) + '000')) + input = AllDatatypes(a='ascii', b=2 ** 63 - 1, c=bytearray(b'hello world'), d=True, + e=datetime.utcfromtimestamp(872835240), f=Decimal('12.3E+7'), g=2.39, + h=3.4028234663852886e+38, i='123.123.123.123', j=2147483647, k='text', + l=UUID('FE2B4360-28C6-11E2-81C1-0800200C9A66'), + m=UUID('067e6162-3b6f-4ae2-a171-2470b63dff00'), n=int(str(2147483647) + '000')) AllDatatypesModel.create(id=0, data=input) self.assertEqual(1, AllDatatypesModel.objects.count()) output = AllDatatypesModel.objects().first().data - for i in range(ord('a'), ord('a') + 15): + for i in range(ord('a'), ord('a') + 14): + self.assertEqual(input[chr(i)], output[chr(i)]) + + def test_can_insert_udts_protocol_v4_datatypes(self): + """ + Test for inserting all protocol v4 column types into a UserType + + test_can_insert_udts_protocol_v4_datatypes tests that each protocol v4 cqlengine column type can be inserted + into a UserType. It first creates a UserType that has each protocol v4 cqlengine column type, and a corresponding + table/Model. It then creates a UserType instance where all the fields have corresponding data, and inserts the + UserType as an instance of the Model. Finally, it verifies that each column read from the UserType from Cassandra + is the same as the input parameters. + + @since 2.6.0 + @jira_ticket PYTHON-245 + @expected_result The UserType is inserted with each protocol v4 column type, and the resulting read yields proper data for each column. + + @test_category data_types:udt + """ + + if PROTOCOL_VERSION < 4: + raise unittest.SkipTest("Protocol v4 datatypes in UDTs require native protocol 4+, currently using: {0}".format(PROTOCOL_VERSION)) + + class AllDatatypes(UserType): + a = columns.Date() + b = columns.SmallInt() + c = columns.Time() + d = columns.TinyInt() + + class AllDatatypesModel(Model): + id = columns.Integer(primary_key=True) + data = columns.UserDefinedType(AllDatatypes) + + sync_table(AllDatatypesModel) + + input = AllDatatypes(a=Date(date(1970, 1, 1)), b=32523, c=Time(time(16, 47, 25, 7)), d=123) + AllDatatypesModel.create(id=0, data=input) + + self.assertEqual(1, AllDatatypesModel.objects.count()) + output = AllDatatypesModel.objects().first().data + + for i in range(ord('a'), ord('a') + 3): self.assertEqual(input[chr(i)], output[chr(i)]) def test_nested_udts_inserts(self): @@ -319,7 +351,7 @@ class Name(UserType): class Container(Model): id = columns.UUID(primary_key=True, default=uuid4) - names = columns.List(columns.UserDefinedType(Name())) + names = columns.List(columns.UserDefinedType(Name)) # Construct the objects and insert them names = [] From f00e5ea350a41d5aa72d21a7c25d218259244bd0 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 22 May 2015 15:26:20 -0500 Subject: [PATCH 1466/3726] Run with Mirroring query handler if C* >= 2.2 moved custom payload tests to .../standard --- tests/integration/__init__.py | 7 ++- .../{long => standard}/test_custom_payload.py | 43 +++---------------- 2 files changed, 11 insertions(+), 39 deletions(-) rename tests/integration/{long => standard}/test_custom_payload.py (81%) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 887498e398..1f43d6d652 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -203,9 +203,14 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True): common.switch_cluster(path, cluster_name) cluster.populate(nodes, ipformat=ipformat) + jvm_args = [] + # This will enable the Mirroring query handler which will echo our custom payload k,v pairs back + if PROTOCOL_VERSION >= 4: + jvm_args = [" -Dcassandra.custom_query_handler_class=org.apache.cassandra.cql3.CustomPayloadMirroringQueryHandler"] + if start: log.debug("Starting ccm %s cluster", cluster_name) - cluster.start(wait_for_binary_proto=True, wait_other_notice=True) + cluster.start(wait_for_binary_proto=True, wait_other_notice=True, jvm_args=jvm_args) setup_keyspace(ipformat=ipformat) CCM_CLUSTER = cluster diff --git a/tests/integration/long/test_custom_payload.py b/tests/integration/standard/test_custom_payload.py similarity index 81% rename from tests/integration/long/test_custom_payload.py rename to tests/integration/standard/test_custom_payload.py index f131a5e756..c253d64b4b 100644 --- a/tests/integration/long/test_custom_payload.py +++ b/tests/integration/standard/test_custom_payload.py @@ -21,54 +21,21 @@ from cassandra.query import (SimpleStatement, BatchStatement, BatchType) from cassandra.cluster import Cluster -from tests.integration import use_singledc, PROTOCOL_VERSION, get_cluster, setup_keyspace - +from tests.integration import use_singledc, PROTOCOL_VERSION def setup_module(): - """ - We need some custom setup for this module. All unit tests in this module - require protocol >=4. We won't bother going through the setup required unless that is the - protocol version we are using. - """ - - # If we aren't at protocol v 4 or greater don't waste time setting anything up, all tests will be skipped - if PROTOCOL_VERSION >= 4: - # Don't start the ccm cluster until we get the custom jvm argument specified - use_singledc(start=False) - ccm_cluster = get_cluster() - # if needed stop CCM cluster - ccm_cluster.stop() - # This will enable the Mirroring query handler which will echo our custom payload k,v pairs back to us - jmv_args = [ - " -Dcassandra.custom_query_handler_class=org.apache.cassandra.cql3.CustomPayloadMirroringQueryHandler"] - ccm_cluster.start(wait_for_binary_proto=True, wait_other_notice=True, jvm_args=jmv_args) - # wait for nodes to startup - setup_keyspace() - - -def teardown_module(): - """ - The rests of the tests don't need our custom payload query handle so stop the cluster so we - don't impact other tests - """ - - ccm_cluster = get_cluster() - if ccm_cluster is not None: - ccm_cluster.stop() - + use_singledc() class CustomPayloadTests(unittest.TestCase): - def setUp(self): - """ - Test is skipped if run with cql version <4 - """ - + @classmethod + def setUpClass(cls): if PROTOCOL_VERSION < 4: raise unittest.SkipTest( "Native protocol 4,0+ is required for custom payloads, currently using %r" % (PROTOCOL_VERSION,)) + def setUp(self): self.cluster = Cluster(protocol_version=PROTOCOL_VERSION) self.session = self.cluster.connect() From 0eb6476aafa5f219c1718210374850227d5f923f Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 26 May 2015 09:03:26 -0500 Subject: [PATCH 1467/3726] Integration test for client warning PYTHON-315 --- .../standard/test_client_warnings.py | 124 ++++++++++++++++++ 1 file changed, 124 insertions(+) create mode 100644 tests/integration/standard/test_client_warnings.py diff --git a/tests/integration/standard/test_client_warnings.py b/tests/integration/standard/test_client_warnings.py new file mode 100644 index 0000000000..b329887239 --- /dev/null +++ b/tests/integration/standard/test_client_warnings.py @@ -0,0 +1,124 @@ +# Copyright 2013-2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +try: + import unittest2 as unittest +except ImportError: + import unittest + +from cassandra.query import BatchStatement +from cassandra.cluster import Cluster + +from tests.integration import use_singledc, PROTOCOL_VERSION + + +def setup_module(): + use_singledc() + + +class ClientWarningTests(unittest.TestCase): + + @classmethod + def setUpClass(cls): + if PROTOCOL_VERSION < 4: + raise unittest.SkipTest( + "Native protocol 4,0+ is required for client warnings, currently using %r" + % (PROTOCOL_VERSION,)) + cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION) + cls.session = cls.cluster.connect() + + cls.session.execute("CREATE TABLE IF NOT EXISTS test1rf.client_warning (k int, v0 int, v1 int, PRIMARY KEY (k, v0))") + cls.prepared = cls.session.prepare("INSERT INTO test1rf.client_warning (k, v0, v1) VALUES (?, ?, ?)") + + cls.warn_batch = BatchStatement() + # 213 = 5 * 1024 / (4+4 + 4+4 + 4+4) + # thresh_kb/ (min param size) + for x in range(213): + cls.warn_batch.add(cls.prepared, (x, x, 1)) + + + @classmethod + def tearDownClass(cls): + cls.cluster.shutdown() + + def test_warning_basic(self): + """ + Test to validate that client warnings can be surfaced + + @since 2.6.0 + @jira_ticket PYTHON-315 + @expected_result valid warnings returned + @test_assumptions + - batch_size_warn_threshold_in_kb: 5 + @test_category queries:client_warning + """ + future = self.session.execute_async(self.warn_batch) + future.result() + self.assertEqual(len(future.warnings), 1) + self.assertRegexpMatches(future.warnings[0], 'Batch.*exceeding.*') + + def test_warning_with_trace(self): + """ + Test to validate client warning with tracing + + @since 2.6.0 + @jira_ticket PYTHON-315 + @expected_result valid warnings returned + @test_assumptions + - batch_size_warn_threshold_in_kb: 5 + @test_category queries:client_warning + """ + future = self.session.execute_async(self.warn_batch, trace=True) + future.result() + self.assertEqual(len(future.warnings), 1) + self.assertRegexpMatches(future.warnings[0], 'Batch.*exceeding.*') + self.assertIsNotNone(future._query_trace) + + def test_warning_with_custom_payload(self): + """ + Test to validate client warning with custom payload + + @since 2.6.0 + @jira_ticket PYTHON-315 + @expected_result valid warnings returned + @test_assumptions + - batch_size_warn_threshold_in_kb: 5 + @test_category queries:client_warning + """ + payload = {'key': 'value'} + future = self.session.execute_async(self.warn_batch, custom_payload=payload) + future.result() + self.assertEqual(len(future.warnings), 1) + self.assertRegexpMatches(future.warnings[0], 'Batch.*exceeding.*') + self.assertDictEqual(future.custom_payload, payload) + + def test_warning_with_trace_and_custom_payload(self): + """ + Test to validate client warning with tracing and client warning + + @since 2.6.0 + @jira_ticket PYTHON-315 + @expected_result valid warnings returned + @test_assumptions + - batch_size_warn_threshold_in_kb: 5 + @test_category queries:client_warning + """ + payload = {'key': 'value'} + future = self.session.execute_async(self.warn_batch, trace=True, custom_payload=payload) + future.result() + self.assertEqual(len(future.warnings), 1) + self.assertRegexpMatches(future.warnings[0], 'Batch.*exceeding.*') + self.assertIsNotNone(future._query_trace) + self.assertDictEqual(future.custom_payload, payload) From 9b149b30925355da94bf1eb1cab17196c2dc4fcd Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 27 May 2015 13:35:28 -0500 Subject: [PATCH 1468/3726] Don't skipTest in unittest.TestCase.setUpClass resolves an issue where skipping from class setup errors in unittest2 This is reported fixed in unittest2 0.4.2, but seems to be still happening in 1.0.1 https://pypi.python.org/pypi/unittest2#id9 --- tests/integration/standard/test_client_warnings.py | 14 +++++++++++--- tests/integration/standard/test_custom_payload.py | 5 +---- 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/tests/integration/standard/test_client_warnings.py b/tests/integration/standard/test_client_warnings.py index b329887239..173cc69756 100644 --- a/tests/integration/standard/test_client_warnings.py +++ b/tests/integration/standard/test_client_warnings.py @@ -33,9 +33,8 @@ class ClientWarningTests(unittest.TestCase): @classmethod def setUpClass(cls): if PROTOCOL_VERSION < 4: - raise unittest.SkipTest( - "Native protocol 4,0+ is required for client warnings, currently using %r" - % (PROTOCOL_VERSION,)) + return + cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION) cls.session = cls.cluster.connect() @@ -51,8 +50,17 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): + if PROTOCOL_VERSION < 4: + return + cls.cluster.shutdown() + def setUp(self): + if PROTOCOL_VERSION < 4: + raise unittest.SkipTest( + "Native protocol 4,0+ is required for client warnings, currently using %r" + % (PROTOCOL_VERSION,)) + def test_warning_basic(self): """ Test to validate that client warnings can be surfaced diff --git a/tests/integration/standard/test_custom_payload.py b/tests/integration/standard/test_custom_payload.py index c253d64b4b..87b38fac40 100644 --- a/tests/integration/standard/test_custom_payload.py +++ b/tests/integration/standard/test_custom_payload.py @@ -28,14 +28,11 @@ def setup_module(): class CustomPayloadTests(unittest.TestCase): - @classmethod - def setUpClass(cls): + def setUp(self): if PROTOCOL_VERSION < 4: raise unittest.SkipTest( "Native protocol 4,0+ is required for custom payloads, currently using %r" % (PROTOCOL_VERSION,)) - - def setUp(self): self.cluster = Cluster(protocol_version=PROTOCOL_VERSION) self.session = self.cluster.connect() From f04a14d5728492c1cc7df72210bbe26b6a597755 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Wed, 27 May 2015 14:41:56 -0500 Subject: [PATCH 1469/3726] Adding documentation for metadata tests --- tests/integration/standard/test_metadata.py | 152 ++++++++++++++++++-- 1 file changed, 143 insertions(+), 9 deletions(-) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 69d599f534..2bb88b50cf 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -1357,20 +1357,31 @@ def make_function_kwargs(self, called_on_null=True): 'called_on_null_input': called_on_null} def test_functions_after_udt(self): + """ + Test to to ensure udt's come after functions in in keyspace dump + + test_functions_after_udt creates a basic function. Then queries that function and make sure that in the results + that UDT's are listed before any corresponding functions, when we dump the keyspace + Ideally we would make a function that takes a udt type, but this presently fails because C* c059a56 requires + udt to be frozen to create, but does not store meta indicating frozen + SEE https://issues.apache.org/jira/browse/CASSANDRA-9186 + Maybe update this after release + kwargs = self.make_function_kwargs() + kwargs['type_signature'][0] = "frozen<%s>" % udt_name + expected_meta = Function(**kwargs) + with self.VerifiedFunction(self, **kwargs): + + @since 2.6.0 + @jira_ticket PYTHON-211 + @expected_result UDT's should come before any functions + @test_category function + """ + self.assertNotIn(self.function_name, self.keyspace_function_meta) udt_name = 'udtx' self.session.execute("CREATE TYPE %s (x int)" % udt_name) - # Ideally we would make a function that takes a udt type, but - # this presently fails because C* c059a56 requires udt to be frozen to create, but does not store meta indicating frozen - # https://issues.apache.org/jira/browse/CASSANDRA-9186 - # Maybe update this after release - #kwargs = self.make_function_kwargs() - #kwargs['type_signature'][0] = "frozen<%s>" % udt_name - - #expected_meta = Function(**kwargs) - #with self.VerifiedFunction(self, **kwargs): with self.VerifiedFunction(self, **self.make_function_kwargs()): # udts must come before functions in keyspace dump keyspace_cql = self.cluster.metadata.keyspaces[self.keyspace_name].export_as_string() @@ -1380,22 +1391,54 @@ def test_functions_after_udt(self): self.assertGreater(func_idx, type_idx) def test_function_same_name_diff_types(self): + """ + Test to verify to that functions with different signatures are differentiated in metadata + + test_function_same_name_diff_types Creates two functions. One with the same name but a slightly different + signature. Then ensures that both are surfaced separately in our metadata. + + @since 2.6.0 + @jira_ticket PYTHON-211 + @expected_result function with the same name but different signatures should be surfaced separately + @test_category function + """ + + # Create a function kwargs = self.make_function_kwargs() with self.VerifiedFunction(self, **kwargs): + # another function: same name, different type sig. self.assertGreater(len(kwargs['type_signature']), 1) self.assertGreater(len(kwargs['argument_names']), 1) kwargs['type_signature'] = kwargs['type_signature'][:1] kwargs['argument_names'] = kwargs['argument_names'][:1] + + # Ensure they are surfaced separately with self.VerifiedFunction(self, **kwargs): functions = [f for f in self.keyspace_function_meta.values() if f.name == self.function_name] self.assertEqual(len(functions), 2) self.assertNotEqual(functions[0].type_signature, functions[1].type_signature) def test_functions_follow_keyspace_alter(self): + """ + Test to verify to that functions maintain equality after a keyspace is altered + + test_functions_follow_keyspace_alter creates a function then alters a the keyspace associated with that function. + After the alter we validate that the function maintains the same metadata + + + @since 2.6.0 + @jira_ticket PYTHON-211 + @expected_result functions are the same after parent keyspace is altered + @test_category function + """ + + # Create function with self.VerifiedFunction(self, **self.make_function_kwargs()): original_keyspace_meta = self.cluster.metadata.keyspaces[self.keyspace_name] self.session.execute('ALTER KEYSPACE %s WITH durable_writes = false' % self.keyspace_name) + + # After keyspace alter ensure that we maintain function equality. try: new_keyspace_meta = self.cluster.metadata.keyspaces[self.keyspace_name] self.assertNotEqual(original_keyspace_meta, new_keyspace_meta) @@ -1404,6 +1447,20 @@ def test_functions_follow_keyspace_alter(self): self.session.execute('ALTER KEYSPACE %s WITH durable_writes = true' % self.keyspace_name) def test_function_cql_called_on_null(self): + """ + Test to verify to that that called on null argument is honored on function creation. + + test_functions_follow_keyspace_alter create two functions. One with the called_on_null_input set to true, + the other with it set to false. We then verify that the metadata constructed from those function is correctly + reflected + + + @since 2.6.0 + @jira_ticket PYTHON-211 + @expected_result functions metadata correctly reflects called_on_null_input flag. + @test_category function + """ + kwargs = self.make_function_kwargs() kwargs['called_on_null_input'] = True with self.VerifiedFunction(self, **kwargs) as vf: @@ -1460,10 +1517,36 @@ def make_aggregate_kwargs(self, state_func, state_type, final_func=None, init_co 'return_type': "does not matter for creation"} def test_return_type_meta(self): + """ + Test to verify to that the return type of a an aggregate is honored in the metadata + + test_return_type_meta creates an aggregate then ensures the return type of the created + aggregate is correctly surfaced in the metadata + + + @since 2.6.0 + @jira_ticket PYTHON-211 + @expected_result aggregate has the correct return typ in the metadata + @test_category aggregate + """ + with self.VerifiedAggregate(self, **self.make_aggregate_kwargs('sum_int', Int32Type, init_cond=1)) as va: self.assertIs(self.keyspace_aggregate_meta[va.signature].return_type, Int32Type) def test_init_cond(self): + """ + Test to verify that various initial conditions are correctly surfaced in various aggregate functions + + test_init_cond creates several different types of aggregates, and given various initial conditions it verifies that + they correctly impact the aggregate's execution + + + @since 2.6.0 + @jira_ticket PYTHON-211 + @expected_result initial conditions are correctly evaluated as part of the aggregates + @test_category aggregate + """ + # This is required until the java driver bundled with C* is updated to support v4 c = Cluster(protocol_version=3) s = c.connect(self.keyspace_name) @@ -1496,6 +1579,19 @@ def test_init_cond(self): c.shutdown() def test_aggregates_after_functions(self): + """ + Test to verify that aggregates are listed after function in metadata + + test_aggregates_after_functions creates an aggregate, and then verifies that they are listed + after any function creations when the keypspace dump is preformed + + + @since 2.6.0 + @jira_ticket PYTHON-211 + @expected_result aggregates are declared after any functions + @test_category aggregate + """ + # functions must come before functions in keyspace dump with self.VerifiedAggregate(self, **self.make_aggregate_kwargs('extend_list', ListType.apply_parameters([UTF8Type]))): keyspace_cql = self.cluster.metadata.keyspaces[self.keyspace_name].export_as_string() @@ -1505,6 +1601,18 @@ def test_aggregates_after_functions(self): self.assertGreater(aggregate_idx, func_idx) def test_same_name_diff_types(self): + """ + Test to verify to that aggregates with different signatures are differentiated in metadata + + test_same_name_diff_types Creates two Aggregates. One with the same name but a slightly different + signature. Then ensures that both are surfaced separately in our metadata. + + @since 2.6.0 + @jira_ticket PYTHON-211 + @expected_result aggregates with the same name but different signatures should be surfaced separately + @test_category function + """ + kwargs = self.make_aggregate_kwargs('sum_int', Int32Type, init_cond=0) with self.VerifiedAggregate(self, **kwargs): kwargs['state_func'] = 'sum_int_two' @@ -1515,6 +1623,19 @@ def test_same_name_diff_types(self): self.assertNotEqual(aggregates[0].type_signature, aggregates[1].type_signature) def test_aggregates_follow_keyspace_alter(self): + """ + Test to verify to that aggregates maintain equality after a keyspace is altered + + test_aggregates_follow_keyspace_alter creates a function then alters a the keyspace associated with that + function. After the alter we validate that the function maintains the same metadata + + + @since 2.6.0 + @jira_ticket PYTHON-211 + @expected_result aggregates are the same after parent keyspace is altered + @test_category function + """ + with self.VerifiedAggregate(self, **self.make_aggregate_kwargs('sum_int', Int32Type, init_cond=0)): original_keyspace_meta = self.cluster.metadata.keyspaces[self.keyspace_name] self.session.execute('ALTER KEYSPACE %s WITH durable_writes = false' % self.keyspace_name) @@ -1526,6 +1647,19 @@ def test_aggregates_follow_keyspace_alter(self): self.session.execute('ALTER KEYSPACE %s WITH durable_writes = true' % self.keyspace_name) def test_cql_optional_params(self): + """ + Test to verify that the initial_cond and final_func parameters are correctly honored + + test_cql_optional_params creates various aggregates with different combinations of initial_condition, + and final_func parameters set. It then ensures they are correctly honored. + + + @since 2.6.0 + @jira_ticket PYTHON-211 + @expected_result initial_condition and final_func parameters are honored correctly + @test_category function + """ + kwargs = self.make_aggregate_kwargs('extend_list', ListType.apply_parameters([UTF8Type])) # no initial condition, final func From c843e6886f3b2b17645117a8d8f1c8187078a254 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 27 May 2015 15:48:46 -0500 Subject: [PATCH 1470/3726] Add windows inet_pton for python versions that do not have the socket routine Fixes an issue with inet addresses in the protocol (PYTHON-309) Adds IPv6 support for Windows (PYTHON-20) --- cassandra/cqltypes.py | 22 ++++-------- cassandra/protocol.py | 5 +-- cassandra/util.py | 83 +++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 92 insertions(+), 18 deletions(-) diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index db0011344d..77fc2b911b 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -517,35 +517,25 @@ def serialize(byts, protocol_version): return varint_pack(byts) -have_ipv6_packing = hasattr(socket, 'inet_ntop') - - class InetAddressType(_CassandraType): typename = 'inet' - # TODO: implement basic ipv6 support for Windows? - # inet_ntop and inet_pton aren't available on Windows - @staticmethod def deserialize(byts, protocol_version): if len(byts) == 16: - if not have_ipv6_packing: - raise Exception( - "IPv6 addresses cannot currently be handled on Windows") - return socket.inet_ntop(socket.AF_INET6, byts) + return util.inet_ntop(socket.AF_INET6, byts) else: + # util.inet_pton could also handle, but this is faster + # since we've already determined the AF return socket.inet_ntoa(byts) @staticmethod def serialize(addr, protocol_version): if ':' in addr: - fam = socket.AF_INET6 - if not have_ipv6_packing: - raise Exception( - "IPv6 addresses cannot currently be handled on Windows") - return socket.inet_pton(fam, addr) + return util.inet_pton(socket.AF_INET6, addr) else: - fam = socket.AF_INET + # util.inet_pton could also handle, but this is faster + # since we've already determined the AF return socket.inet_aton(addr) diff --git a/cassandra/protocol.py b/cassandra/protocol.py index 13078b46fd..1c851f8c5d 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -39,6 +39,7 @@ TupleType, lookup_casstype, SimpleDateType, TimeType, ByteType, ShortType) from cassandra.policies import WriteType +from cassandra import util log = logging.getLogger(__name__) @@ -1133,7 +1134,7 @@ def read_inet(f): addrfam = socket.AF_INET6 else: raise InternalError("bad inet address: %r" % (addrbytes,)) - return (socket.inet_ntop(addrfam, addrbytes), port) + return (util.inet_ntop(addrfam, addrbytes), port) def write_inet(f, addrtuple): @@ -1142,7 +1143,7 @@ def write_inet(f, addrtuple): addrfam = socket.AF_INET6 else: addrfam = socket.AF_INET - addrbytes = socket.inet_pton(addrfam, addr) + addrbytes = util.inet_pton(addrfam, addr) write_byte(f, len(addrbytes)) f.write(addrbytes) write_int(f, port) diff --git a/cassandra/util.py b/cassandra/util.py index 16457df589..f188cdcb2f 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -1011,3 +1011,86 @@ def __str__(self): except: # If we overflow datetime.[MIN|MAX] return str(self.days_from_epoch) + +import socket +if hasattr(socket, 'inet_pton'): + inet_pton = socket.inet_pton + inet_ntop = socket.inet_ntop +else: + """ + Windows doesn't have socket.inet_pton and socket.inet_ntop until Python 3.4 + This is an alternative impl using ctypes, based on this win_inet_pton project: + https://github.com/hickeroar/win_inet_pton + """ + import ctypes + + + class sockaddr(ctypes.Structure): + _fields_ = [("sa_family", ctypes.c_short), + ("__pad1", ctypes.c_ushort), + ("ipv4_addr", ctypes.c_byte * 4), + ("ipv6_addr", ctypes.c_byte * 16), + ("__pad2", ctypes.c_ulong)] + + + if hasattr(ctypes, 'windll'): + WSAStringToAddressA = ctypes.windll.ws2_32.WSAStringToAddressA + WSAAddressToStringA = ctypes.windll.ws2_32.WSAAddressToStringA + else: + def not_windows(*args): + raise Exception("IPv6 addresses cannot be handled on Windows. " + "Missing ctypes.windll") + WSAStringToAddressA = not_windows + WSAAddressToStringA = not_windows + + + def inet_pton(address_family, ip_string): + if address_family == socket.AF_INET: + return socket.inet_aton(ip_string) + + addr = sockaddr() + addr.sa_family = address_family + addr_size = ctypes.c_int(ctypes.sizeof(addr)) + + if WSAStringToAddressA( + ip_string, + address_family, + None, + ctypes.byref(addr), + ctypes.byref(addr_size) + ) != 0: + raise socket.error(ctypes.FormatError()) + + if address_family == socket.AF_INET6: + return ctypes.string_at(addr.ipv6_addr, 16) + + raise socket.error('unknown address family') + + + def inet_ntop(address_family, packed_ip): + if address_family == socket.AF_INET: + return socket.inet_ntoa(packed_ip) + + addr = sockaddr() + addr.sa_family = address_family + addr_size = ctypes.c_int(ctypes.sizeof(addr)) + ip_string = ctypes.create_string_buffer(128) + ip_string_size = ctypes.c_int(ctypes.sizeof(ip_string)) + + if address_family == socket.AF_INET6: + if len(packed_ip) != ctypes.sizeof(addr.ipv6_addr): + raise socket.error('packed IP wrong length for inet_ntoa') + ctypes.memmove(addr.ipv6_addr, packed_ip, 16) + else: + raise socket.error('unknown address family') + + if WSAAddressToStringA( + ctypes.byref(addr), + addr_size, + None, + ip_string, + ctypes.byref(ip_string_size) + ) != 0: + raise socket.error(ctypes.FormatError()) + + return ip_string[:ip_string_size.value - 1] From 2e4fbba38e235952630f5c9d1b7860e2aec53295 Mon Sep 17 00:00:00 2001 From: Kishan Karunaratne Date: Wed, 27 May 2015 16:03:35 -0700 Subject: [PATCH 1471/3726] Jenkins stabilization fixes --- .../columns/test_container_columns.py | 36 +++++++++++++++---- .../integration/cqlengine/model/test_udts.py | 16 ++++----- tests/integration/standard/test_types.py | 2 -- 3 files changed, 37 insertions(+), 17 deletions(-) diff --git a/tests/integration/cqlengine/columns/test_container_columns.py b/tests/integration/cqlengine/columns/test_container_columns.py index a9c8b35965..85a4255813 100644 --- a/tests/integration/cqlengine/columns/test_container_columns.py +++ b/tests/integration/cqlengine/columns/test_container_columns.py @@ -13,9 +13,10 @@ # limitations under the License. from datetime import datetime, timedelta -import json +import json, six, sys, traceback, logging from uuid import uuid4 -import six + +from cassandra import WriteTimeout from cassandra.cqlengine.models import Model, ValidationError import cassandra.cqlengine.columns as columns @@ -23,10 +24,10 @@ from tests.integration.cqlengine import is_prepend_reversed from tests.integration.cqlengine.base import BaseCassEngTestCase - -class TestSetModel(Model): +log = logging.getLogger(__name__) +class TestSetModel(Model): partition = columns.UUID(primary_key=True, default=uuid4) int_set = columns.Set(columns.Integer, required=False) text_set = columns.Set(columns.Text, required=False) @@ -124,7 +125,14 @@ def test_element_count_validation(self): """ Tests that big collections are detected and raise an exception. """ - TestSetModel.create(text_set={str(uuid4()) for i in range(65535)}) + while True: + try: + TestSetModel.create(text_set={str(uuid4()) for i in range(65535)}) + break + except WriteTimeout: + ex_type, ex, tb = sys.exc_info() + log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) + del tb with self.assertRaises(ValidationError): TestSetModel.create(text_set={str(uuid4()) for i in range(65536)}) @@ -235,7 +243,14 @@ def test_element_count_validation(self): """ Tests that big collections are detected and raise an exception. """ - TestListModel.create(text_list=[str(uuid4()) for i in range(65535)]) + while True: + try: + TestListModel.create(text_list=[str(uuid4()) for i in range(65535)]) + break + except WriteTimeout: + ex_type, ex, tb = sys.exc_info() + log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) + del tb with self.assertRaises(ValidationError): TestListModel.create(text_list=[str(uuid4()) for i in range(65536)]) @@ -410,7 +425,14 @@ def test_element_count_validation(self): """ Tests that big collections are detected and raise an exception. """ - TestMapModel.create(text_map={str(uuid4()): i for i in range(65535)}) + while True: + try: + TestMapModel.create(text_map={str(uuid4()): i for i in range(65535)}) + break + except WriteTimeout: + ex_type, ex, tb = sys.exc_info() + log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) + del tb with self.assertRaises(ValidationError): TestMapModel.create(text_map={str(uuid4()): i for i in range(65536)}) diff --git a/tests/integration/cqlengine/model/test_udts.py b/tests/integration/cqlengine/model/test_udts.py index 49e934c53e..2d870d38ed 100644 --- a/tests/integration/cqlengine/model/test_udts.py +++ b/tests/integration/cqlengine/model/test_udts.py @@ -306,23 +306,23 @@ def test_can_insert_udts_protocol_v4_datatypes(self): if PROTOCOL_VERSION < 4: raise unittest.SkipTest("Protocol v4 datatypes in UDTs require native protocol 4+, currently using: {0}".format(PROTOCOL_VERSION)) - class AllDatatypes(UserType): + class Allv4Datatypes(UserType): a = columns.Date() b = columns.SmallInt() c = columns.Time() d = columns.TinyInt() - class AllDatatypesModel(Model): + class Allv4DatatypesModel(Model): id = columns.Integer(primary_key=True) - data = columns.UserDefinedType(AllDatatypes) + data = columns.UserDefinedType(Allv4Datatypes) - sync_table(AllDatatypesModel) + sync_table(Allv4DatatypesModel) - input = AllDatatypes(a=Date(date(1970, 1, 1)), b=32523, c=Time(time(16, 47, 25, 7)), d=123) - AllDatatypesModel.create(id=0, data=input) + input = Allv4Datatypes(a=Date(date(1970, 1, 1)), b=32523, c=Time(time(16, 47, 25, 7)), d=123) + Allv4DatatypesModel.create(id=0, data=input) - self.assertEqual(1, AllDatatypesModel.objects.count()) - output = AllDatatypesModel.objects().first().data + self.assertEqual(1, Allv4DatatypesModel.objects.count()) + output = Allv4DatatypesModel.objects().first().data for i in range(ord('a'), ord('a') + 3): self.assertEqual(input[chr(i)], output[chr(i)]) diff --git a/tests/integration/standard/test_types.py b/tests/integration/standard/test_types.py index ba2a9738a8..c6c8114b57 100644 --- a/tests/integration/standard/test_types.py +++ b/tests/integration/standard/test_types.py @@ -89,8 +89,6 @@ def test_can_insert_blob_type_as_string(self): for expected, actual in zip(params, results): self.assertEqual(expected, actual) - c.shutdown() - def test_can_insert_blob_type_as_bytearray(self): """ Tests that blob type in Cassandra maps to bytearray in Python From 2216c3105327ba84a3be69c6f7e83272ed3481c9 Mon Sep 17 00:00:00 2001 From: Kishan Karunaratne Date: Wed, 27 May 2015 18:38:00 -0700 Subject: [PATCH 1472/3726] change overlapping table names in cqle test v4 test --- tests/integration/cqlengine/model/test_model_io.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/integration/cqlengine/model/test_model_io.py b/tests/integration/cqlengine/model/test_model_io.py index 436e8955fb..2033d9dc92 100644 --- a/tests/integration/cqlengine/model/test_model_io.py +++ b/tests/integration/cqlengine/model/test_model_io.py @@ -199,21 +199,21 @@ def test_can_insert_model_with_all_protocol_v4_column_types(self): if PROTOCOL_VERSION < 4: raise unittest.SkipTest("Protocol v4 datatypes require native protocol 4+, currently using: {0}".format(PROTOCOL_VERSION)) - class AllDatatypesModel(Model): + class v4DatatypesModel(Model): id = columns.Integer(primary_key=True) a = columns.Date() b = columns.SmallInt() c = columns.Time() d = columns.TinyInt() - sync_table(AllDatatypesModel) + sync_table(v4DatatypesModel) input = [Date(date(1970, 1, 1)), 32523, Time(time(16, 47, 25, 7)), 123] - AllDatatypesModel.create(id=0, a=date(1970, 1, 1), b=32523, c=time(16, 47, 25, 7), d=123) + v4DatatypesModel.create(id=0, a=date(1970, 1, 1), b=32523, c=time(16, 47, 25, 7), d=123) - self.assertEqual(1, AllDatatypesModel.objects.count()) - output = AllDatatypesModel.objects().first() + self.assertEqual(1, v4DatatypesModel.objects.count()) + output = v4DatatypesModel.objects().first() for i, i_char in enumerate(range(ord('a'), ord('a') + 3)): self.assertEqual(input[i], output[chr(i_char)]) From 33c71799c8c7b29734be15297578f7f75d9ff9be Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 28 May 2015 11:06:37 -0500 Subject: [PATCH 1473/3726] Update CQL keywords, expose in API docs. PYTHON-319 PYTHON-324 --- cassandra/metadata.py | 49 +++++++++++++++++++++++++-------- docs/api/cassandra/metadata.rst | 9 ++++++ tests/unit/test_metadata.py | 4 +-- 3 files changed, 48 insertions(+), 14 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 8ede52dc1e..843d96f38e 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -37,19 +37,44 @@ log = logging.getLogger(__name__) -_keywords = set(( - 'select', 'from', 'where', 'and', 'key', 'insert', 'update', 'with', - 'limit', 'using', 'use', 'count', 'set', - 'begin', 'apply', 'batch', 'truncate', 'delete', 'in', 'create', - 'keyspace', 'schema', 'columnfamily', 'table', 'index', 'on', 'drop', - 'primary', 'into', 'values', 'timestamp', 'ttl', 'alter', 'add', 'type', - 'compact', 'storage', 'order', 'by', 'asc', 'desc', 'clustering', - 'token', 'writetime', 'map', 'list', 'to' +cql_keywords = set(( + 'add', 'aggregate', 'all', 'allow', 'alter', 'and', 'apply', 'as', 'asc', 'ascii', 'authorize', 'batch', 'begin', + 'bigint', 'blob', 'boolean', 'by', 'called', 'clustering', 'columnfamily', 'compact', 'contains', 'count', + 'counter', 'create', 'custom', 'date', 'decimal', 'delete', 'desc', 'describe', 'distinct', 'double', 'drop', + 'entries', 'execute', 'exists', 'filtering', 'finalfunc', 'float', 'from', 'frozen', 'full', 'function', + 'functions', 'grant', 'if', 'in', 'index', 'inet', 'infinity', 'initcond', 'input', 'insert', 'int', 'into', 'json', + 'key', 'keys', 'keyspace', 'keyspaces', 'language', 'limit', 'list', 'login', 'map', 'modify', 'nan', 'nologin', + 'norecursive', 'nosuperuser', 'not', 'null', 'of', 'on', 'options', 'or', 'order', 'password', 'permission', + 'permissions', 'primary', 'rename', 'replace', 'returns', 'revoke', 'role', 'roles', 'schema', 'select', 'set', + 'sfunc', 'smallint', 'static', 'storage', 'stype', 'superuser', 'table', 'text', 'time', 'timestamp', 'timeuuid', + 'tinyint', 'to', 'token', 'trigger', 'truncate', 'ttl', 'tuple', 'type', 'unlogged', 'update', 'use', 'user', + 'users', 'using', 'uuid', 'values', 'varchar', 'varint', 'where', 'with', 'writetime' )) - -_unreserved_keywords = set(( - 'key', 'clustering', 'ttl', 'compact', 'storage', 'type', 'values' +""" +Set of keywords in CQL. + +Derived from .../cassandra/src/java/org/apache/cassandra/cql3/Cql.g +""" + +cql_keywords_unreserved = set(( + 'aggregate', 'all', 'as', 'ascii', 'bigint', 'blob', 'boolean', 'called', 'clustering', 'compact', 'contains', + 'count', 'counter', 'custom', 'date', 'decimal', 'distinct', 'double', 'exists', 'filtering', 'finalfunc', 'float', + 'frozen', 'function', 'functions', 'inet', 'initcond', 'input', 'int', 'json', 'key', 'keys', 'keyspaces', + 'language', 'list', 'login', 'map', 'nologin', 'nosuperuser', 'options', 'password', 'permission', 'permissions', + 'returns', 'role', 'roles', 'sfunc', 'smallint', 'static', 'storage', 'stype', 'superuser', 'text', 'time', + 'timestamp', 'timeuuid', 'tinyint', 'trigger', 'ttl', 'tuple', 'type', 'user', 'users', 'uuid', 'values', 'varchar', + 'varint', 'writetime' )) +""" +Set of unreserved keywords in CQL. + +Derived from .../cassandra/src/java/org/apache/cassandra/cql3/Cql.g +""" + +cql_keywords_reserved = cql_keywords - cql_keywords_unreserved +""" +Set of reserved keywords in CQL. +""" class Metadata(object): @@ -1383,7 +1408,7 @@ def protect_value(value): def is_valid_name(name): if name is None: return False - if name.lower() in _keywords - _unreserved_keywords: + if name.lower() in cql_keywords_reserved: return False return valid_cql3_word_re.match(name) is not None diff --git a/docs/api/cassandra/metadata.rst b/docs/api/cassandra/metadata.rst index a89fd03ddb..291f63d5be 100644 --- a/docs/api/cassandra/metadata.rst +++ b/docs/api/cassandra/metadata.rst @@ -3,6 +3,15 @@ .. module:: cassandra.metadata +.. autodata:: cql_keywords + :annotation: + +.. autodata:: cql_keywords_unreserved + :annotation: + +.. autodata:: cql_keywords_reserved + :annotation: + .. autoclass:: Metadata () :members: :exclude-members: rebuild_schema, rebuild_token_map, add_host, remove_host, get_host diff --git a/tests/unit/test_metadata.py b/tests/unit/test_metadata.py index 5ce78f2e25..8bdd428c65 100644 --- a/tests/unit/test_metadata.py +++ b/tests/unit/test_metadata.py @@ -210,8 +210,8 @@ def test_is_valid_name(self): self.assertEqual(is_valid_name('test1'), True) self.assertEqual(is_valid_name('1test1'), False) - non_valid_keywords = cassandra.metadata._keywords - cassandra.metadata._unreserved_keywords - for keyword in non_valid_keywords: + invalid_keywords = cassandra.metadata.cql_keywords - cassandra.metadata.cql_keywords_unreserved + for keyword in invalid_keywords: self.assertEqual(is_valid_name(keyword), False) From 6b6d31cf5622f94eddae3eb31a0a0373b22e934d Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 28 May 2015 12:53:08 -0500 Subject: [PATCH 1474/3726] cqle: replace deprecated refresh_schema with new fn --- cassandra/cqlengine/management.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/cqlengine/management.py b/cassandra/cqlengine/management.py index 5323c8f93d..5d5e641459 100644 --- a/cassandra/cqlengine/management.py +++ b/cassandra/cqlengine/management.py @@ -302,7 +302,7 @@ def _sync_type(ks_name, type_model, omit_subtypes=None): log.debug("sync_type creating new type %s", type_name_qualified) cql = get_create_type(type_model, ks_name) execute(cql) - cluster.refresh_schema(keyspace=ks_name, usertype=type_name) + cluster.refresh_user_type_metadata(ks_name, type_name) type_model.register_for_keyspace(ks_name) else: defined_fields = defined_types[type_name].field_names From 58676ac9a25c943ac1420a3fddd8dd7b46767e1f Mon Sep 17 00:00:00 2001 From: Kracekumar Ramaraju Date: Fri, 29 May 2015 12:11:40 +0530 Subject: [PATCH 1475/3726] Use inbuilt UUID for validation --- cassandra/cqlengine/columns.py | 12 ++++++------ .../integration/cqlengine/columns/test_validation.py | 7 +++++++ 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index 9edf0d6588..57a4ffd392 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -15,7 +15,6 @@ from copy import deepcopy, copy from datetime import date, datetime import logging -import re import six import warnings @@ -518,8 +517,6 @@ class UUID(Column): """ db_type = 'uuid' - re_uuid = re.compile(r'[0-9a-f]{8}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{12}') - def validate(self, value): val = super(UUID, self).validate(value) if val is None: @@ -527,9 +524,12 @@ def validate(self, value): from uuid import UUID as _UUID if isinstance(val, _UUID): return val - if isinstance(val, six.string_types) and self.re_uuid.match(val): - return _UUID(val) - raise ValidationError("{} {} is not a valid uuid".format(self.column_name, value)) + if isinstance(val, six.string_types): + try: + return _UUID(val) + except ValueError: + raise ValidationError("{} {} is not a valid uuid".format( + self.column_name, value)) def to_python(self, value): return self.validate(value) diff --git a/tests/integration/cqlengine/columns/test_validation.py b/tests/integration/cqlengine/columns/test_validation.py index 0d790bfc60..a74f83a0de 100644 --- a/tests/integration/cqlengine/columns/test_validation.py +++ b/tests/integration/cqlengine/columns/test_validation.py @@ -240,6 +240,13 @@ def test_uuid_str_no_dashes(self): t1 = self.UUIDTest.get(test_id=1) assert a_uuid == t1.a_uuid + def test_uuid_with_upcase(self): + a_uuid = uuid4() + val = str(a_uuid).upper() + t0 = self.UUIDTest.create(test_id=0, a_uuid=val) + t1 = self.UUIDTest.get(test_id=0) + assert a_uuid == t1.a_uuid + class TestTimeUUID(BaseCassEngTestCase): class TimeUUIDTest(Model): From 8e565d06ab94de13ac43e00a81d9741d9250f2e8 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 29 May 2015 11:03:39 -0500 Subject: [PATCH 1476/3726] ValidationError for invalid UUID types Follow-on to #335. Fixes issue where non-string or UUID types would return None from UUID.validate. Also removed test that did not do what it purported (previously inserting None for key instead of value, and failing for Validation of UUID and not key). --- cassandra/cqlengine/columns.py | 8 ++++++-- .../cqlengine/columns/test_container_columns.py | 8 +------- 2 files changed, 7 insertions(+), 9 deletions(-) diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index 57a4ffd392..d418b11947 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -528,8 +528,10 @@ def validate(self, value): try: return _UUID(val) except ValueError: - raise ValidationError("{} {} is not a valid uuid".format( - self.column_name, value)) + # fall-through to error + pass + raise ValidationError("{} {} is not a valid uuid".format( + self.column_name, value)) def to_python(self, value): return self.validate(value) @@ -824,6 +826,8 @@ def validate(self, value): return if not isinstance(val, dict): raise ValidationError('{} {} is not a dict object'.format(self.column_name, val)) + if None in val: + raise ValidationError("{} None is not allowed in a map".format(self.column_name)) return {self.key_col.validate(k): self.value_col.validate(v) for k, v in val.items()} def to_python(self, value): diff --git a/tests/integration/cqlengine/columns/test_container_columns.py b/tests/integration/cqlengine/columns/test_container_columns.py index 85a4255813..6ec301eb67 100644 --- a/tests/integration/cqlengine/columns/test_container_columns.py +++ b/tests/integration/cqlengine/columns/test_container_columns.py @@ -345,8 +345,6 @@ def test_blind_list_updates_from_none(self): assert m3.int_list == [] class TestMapModel(Model): - - partition = columns.UUID(primary_key=True, default=uuid4) int_map = columns.Map(columns.Integer, columns.UUID, required=False) text_map = columns.Map(columns.Text, columns.DateTime, required=False) @@ -371,11 +369,7 @@ def test_empty_default(self): def test_add_none_as_map_key(self): with self.assertRaises(ValidationError): - TestMapModel.create(int_map={None:1}) - - def test_add_none_as_map_value(self): - with self.assertRaises(ValidationError): - TestMapModel.create(int_map={None:1}) + TestMapModel.create(int_map={None: uuid4()}) def test_empty_retrieve(self): tmp = TestMapModel.create() From 101a4ab76e4b39d0759471f2e2d3f52dc4edd744 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 29 May 2015 12:59:02 -0500 Subject: [PATCH 1477/3726] Exclude the libevwrapper extention for Windows --- setup.py | 24 +++++++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) diff --git a/setup.py b/setup.py index df3aef2797..e85d332d37 100644 --- a/setup.py +++ b/setup.py @@ -13,6 +13,7 @@ # limitations under the License. from __future__ import print_function +import os import sys import warnings @@ -37,9 +38,6 @@ from distutils.cmd import Command -import os -import warnings - try: import subprocess has_subprocess = True @@ -247,6 +245,26 @@ def run_setup(extensions): sys.argv = [a for a in sys.argv if a != "--no-libev"] extensions.remove(libev_ext) +is_windows = os.name == 'nt' +if is_windows: + # libev is difficult to build, and uses select in Windows. + try: + extensions.remove(libev_ext) + except ValueError: + pass + build_extensions.error_message = """ +=============================================================================== +WARNING: could not compile %s. + +The C extensions are not required for the driver to run, but they add support +for token-aware routing with the Murmur3Partitioner. + +On Windows, make sure Visual Studio or an SDK is installed, and your environment +is configured to build for the appropriate architecture (matching your Python runtime). +This is often a matter of using vcvarsall.bat from your install directory, or running +from a command prompt in the Visual Studio Tools Start Menu. +=============================================================================== +""" platform_unsupported_msg = \ """ From f247df78764b10440b61bc8e9fbe2ef1ad6f42c6 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 29 May 2015 13:30:41 -0500 Subject: [PATCH 1478/3726] cqle: add Float/Double overload deprecation to upgrade guide --- docs/api/cassandra/cqlengine/columns.rst | 2 +- docs/cqlengine/upgrade_guide.rst | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/docs/api/cassandra/cqlengine/columns.rst b/docs/api/cassandra/cqlengine/columns.rst index aad164b606..7e3957e0cb 100644 --- a/docs/api/cassandra/cqlengine/columns.rst +++ b/docs/api/cassandra/cqlengine/columns.rst @@ -58,7 +58,7 @@ Columns of all types are initialized by passing :class:`.Column` attributes to t .. autoclass:: Decimal(**kwargs) -.. autoclass:: Double +.. autoclass:: Double(**kwargs) .. autoclass:: Float diff --git a/docs/cqlengine/upgrade_guide.rst b/docs/cqlengine/upgrade_guide.rst index 3488902790..831f94d7cf 100644 --- a/docs/cqlengine/upgrade_guide.rst +++ b/docs/cqlengine/upgrade_guide.rst @@ -79,6 +79,12 @@ This upgrade served as a good juncture to deprecate certain API features and inv to new ones. The first released version does not change functionality -- only introduces deprecation warnings. Future releases will remove these features in favor of the alternatives. +Float/Double Overload +--------------------- +Previously there was no ``Double`` column type. Doubles were modeled by specifying ``Float(double_precision=True)``. +This inititializer parameter is now deprecated. Applications should use :class:`~.columns.Double` for CQL ``double``, and :class:`~.columns.Float` +for CQL ``float``. + Schema Management ----------------- ``cassandra.cqlengine.management.create_keyspace`` is deprecated. Instead, use the new replication-strategy-specific From b945fa6fe508a020589f04a2b5a32c8257d852de Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 29 May 2015 13:45:30 -0500 Subject: [PATCH 1479/3726] Add link to platform/runtime survey --- README.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.rst b/README.rst index e976f93a66..bdc6dca7eb 100644 --- a/README.rst +++ b/README.rst @@ -12,6 +12,8 @@ The driver supports Python 2.6, 2.7, 3.3, and 3.4*. * cqlengine component presently supports Python 2.7+ +**Help us focus our efforts!** Provide your input on the `Platform and Runtime Survey `_. + Installation ------------ Installation through pip is recommended:: From f00bfe746d3b8c8d97c7332e5ebeb5973391a231 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 29 May 2015 14:23:43 -0500 Subject: [PATCH 1480/3726] Changelog update --- CHANGELOG.rst | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 45b087bc5f..bfa6776afe 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,41 @@ +2.6.0 +===== + +This release adds support for Cassandra 2.2 features, including version +4 of the native protocol. + +Features +-------- +* Default load balancing policy to TokenAware(DCAware) (PYTHON-160) +* Configuration option for connection timeout (PYTHON-206) +* Support User Defined Function and Aggregate metadata in C* 2.2 (PYTHON-211) +* Surface request client in QueryTrace for C* 2.2+ (PYTHON-235) +* Implement new request failure messages in protocol v4+ (PYTHON-238) +* Metadata model now maps index meta by index name (PYTHON-241) +* Support new types in C* 2.2: date, time, smallint, tinyint (PYTHON-245, 295) +* cqle: add Double column type and remove Float overload (PYTHON-246) +* Use partition key column information in prepared response for protocol v4+ (PYTHON-277) +* Support message custom payloads in protocol v4+ (PYTHON-280) +* Deprecate refresh_schema and replace with functions for specific entities (PYTHON-291) +* Save trace id even when trace complete times out (PYTHON-302) +* Warn when registering client UDT class for protocol < v3 (PYTHON-305) +* Support client warnings returned with messages in protocol v4+ (PYTHON-315) +* Ability to distinguish between NULL and UNSET values in protocol v4+ (PYTHON-317) +* Expose CQL keywords in API (PYTHON-324) + +Bug Fixes +--------- +* IPv6 address support on Windows (PYTHON-20) +* Convert exceptions during automatic re-preparation to nice exceptions (PYTHON-207) +* cqle: Quote keywords properly in table management functions (PYTHON-244) +* Don't default to GeventConnection when gevent is loaded, but not monkey-patched (PYTHON-289) +* Pass dynamic host from SaslAuthProvider to SaslAuthenticator (PYTHON-300) +* Make protocol read_inet work for Windows (PYTHON-309) +* cqle: Correct encoding for nested types (PYTHON-311) +* Update list of CQL keywords used quoting identifiers (PYTHON-319) +* Make ConstantReconnectionPolicy work with infinite retries (github #327, PYTHON-325) +* Accept UUIDs with uppercase hex as valid in cqlengine (github #335) + 2.5.1 ===== April 23, 2015 From 38705c229ab4c1b4a047f8b140408b1ef456c49f Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 29 May 2015 15:09:15 -0500 Subject: [PATCH 1481/3726] cqle: Note about Date type in upgrade guide --- docs/api/cassandra/cqlengine/columns.rst | 2 +- docs/cqlengine/upgrade_guide.rst | 14 ++++++++++++-- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/docs/api/cassandra/cqlengine/columns.rst b/docs/api/cassandra/cqlengine/columns.rst index 7e3957e0cb..ae597a55f7 100644 --- a/docs/api/cassandra/cqlengine/columns.rst +++ b/docs/api/cassandra/cqlengine/columns.rst @@ -52,7 +52,7 @@ Columns of all types are initialized by passing :class:`.Column` attributes to t .. autoclass:: Counter -.. autoclass:: Date +.. autoclass:: Date(**kwargs) .. autoclass:: DateTime(**kwargs) diff --git a/docs/cqlengine/upgrade_guide.rst b/docs/cqlengine/upgrade_guide.rst index 831f94d7cf..9af5876426 100644 --- a/docs/cqlengine/upgrade_guide.rst +++ b/docs/cqlengine/upgrade_guide.rst @@ -8,13 +8,23 @@ conversion to this package will still require certain minimal updates (namely, i **THERE IS ONE FUNCTIONAL CHANGE**, described in the first section below. -Functional Change -================= +Functional Changes +================== +List Prepend Reversing +---------------------- Legacy cqlengine included a workaround for a Cassandra bug in which prepended list segments were reversed (`CASSANDRA-8733 `_). As of this integration, this workaround is removed. The first released integrated version emits a warning when prepend is used. Subsequent versions will have this warning removed. +Date Column Type +---------------- +The Date column type in legacy cqlengine used a ``timestamp`` CQL type and truncated the time. +Going forward, the :class:`~.columns.Date` type represents a ``date`` for Cassandra 2.2+ +(`PYTHON-245 `_). +Users of the legacy functionality should convert models to use :class:`~.columns.DateTime` (which +uses ``timestamp`` internally), and use the build-in ``datetime.date`` for input values. + Remove cqlengine ================ To avoid confusion or mistakes using the legacy package in your application, it From dc433b91e6d03f2cecce36cebd1a50fd8e0fdf3e Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 1 Jun 2015 09:23:10 -0500 Subject: [PATCH 1482/3726] Add comment about dual-use socket struct util.sockaddr --- cassandra/util.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/cassandra/util.py b/cassandra/util.py index f188cdcb2f..45ef0e4fe7 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -1024,15 +1024,23 @@ def __str__(self): """ import ctypes - class sockaddr(ctypes.Structure): + """ + Shared struct for ipv4 and ipv6. + + https://msdn.microsoft.com/en-us/library/windows/desktop/ms740496(v=vs.85).aspx + + ``__pad1`` always covers the port. + + When being used for ``sockaddr_in6``, ``ipv4_addr`` actually covers ``sin6_flowinfo``, resulting + in proper alignment for ``ipv6_addr``. + """ _fields_ = [("sa_family", ctypes.c_short), ("__pad1", ctypes.c_ushort), ("ipv4_addr", ctypes.c_byte * 4), ("ipv6_addr", ctypes.c_byte * 16), ("__pad2", ctypes.c_ulong)] - if hasattr(ctypes, 'windll'): WSAStringToAddressA = ctypes.windll.ws2_32.WSAStringToAddressA WSAAddressToStringA = ctypes.windll.ws2_32.WSAAddressToStringA @@ -1043,7 +1051,6 @@ def not_windows(*args): WSAStringToAddressA = not_windows WSAAddressToStringA = not_windows - def inet_pton(address_family, ip_string): if address_family == socket.AF_INET: return socket.inet_aton(ip_string) @@ -1066,7 +1073,6 @@ def inet_pton(address_family, ip_string): raise socket.error('unknown address family') - def inet_ntop(address_family, packed_ip): if address_family == socket.AF_INET: return socket.inet_ntoa(packed_ip) From 6fec9d17a4ab91a15ad6f86ddddd450573f725ae Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 1 Jun 2015 12:52:39 -0500 Subject: [PATCH 1483/3726] Add Cluster.connect_timeout to api doc --- docs/api/cassandra/cluster.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/api/cassandra/cluster.rst b/docs/api/cassandra/cluster.rst index 1e4910cc0c..2b2413354f 100644 --- a/docs/api/cassandra/cluster.rst +++ b/docs/api/cassandra/cluster.rst @@ -47,6 +47,8 @@ .. autoattribute:: topology_event_refresh_window + .. autoattribute:: connect_timeout + .. automethod:: connect .. automethod:: shutdown From 2b7d9bc8ab36e0b806476451a40e8b70b7ee0b19 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 1 Jun 2015 13:20:21 -0500 Subject: [PATCH 1484/3726] Add cluster.ResponseFuture.warnings to api doc --- docs/api/cassandra/cluster.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/api/cassandra/cluster.rst b/docs/api/cassandra/cluster.rst index 2b2413354f..195d03a6ef 100644 --- a/docs/api/cassandra/cluster.rst +++ b/docs/api/cassandra/cluster.rst @@ -128,6 +128,8 @@ .. autoattribute:: has_more_pages + .. autoattribute:: warnings + .. automethod:: start_fetching_next_page() .. automethod:: add_callback(fn, *args, **kwargs) From a15729c74c2a3982fdccf0501b144a2614e4e173 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 1 Jun 2015 13:20:48 -0500 Subject: [PATCH 1485/3726] cqle: Tighten-up columns class docs with inheritied init --- docs/api/cassandra/cqlengine/columns.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/api/cassandra/cqlengine/columns.rst b/docs/api/cassandra/cqlengine/columns.rst index ae597a55f7..7c4695d323 100644 --- a/docs/api/cassandra/cqlengine/columns.rst +++ b/docs/api/cassandra/cqlengine/columns.rst @@ -70,15 +70,15 @@ Columns of all types are initialized by passing :class:`.Column` attributes to t .. autoclass:: Set -.. autoclass:: SmallInt +.. autoclass:: SmallInt(**kwargs) .. autoclass:: Text -.. autoclass:: Time +.. autoclass:: Time(**kwargs) .. autoclass:: TimeUUID(**kwargs) -.. autoclass:: TinyInt +.. autoclass:: TinyInt(**kwargs) .. autoclass:: UserDefinedType From c967f10fc201b44743526455d69c79a1df6284ec Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 1 Jun 2015 13:49:10 -0500 Subject: [PATCH 1486/3726] Update CHANGELOG.rst --- CHANGELOG.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index bfa6776afe..63f4185c30 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,4 +1,4 @@ -2.6.0 +2.6.0rc1 ===== This release adds support for Cassandra 2.2 features, including version From 03c2b2ae1f5abbd8ea0383130ada2db1f40f86c8 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 1 Jun 2015 15:44:26 -0500 Subject: [PATCH 1487/3726] Update README.rst --- README.rst | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/README.rst b/README.rst index bdc6dca7eb..261d1e31a9 100644 --- a/README.rst +++ b/README.rst @@ -4,16 +4,26 @@ DataStax Python Driver for Apache Cassandra .. image:: https://travis-ci.org/datastax/python-driver.png?branch=master :target: https://travis-ci.org/datastax/python-driver -A Python client driver for Apache Cassandra. This driver works exclusively -with the Cassandra Query Language v3 (CQL3) and Cassandra's native -protocol. Cassandra versions 1.2 through 2.1 are supported. +A modern, `feature-rich `_ and highly-tunable Python client library for Apache Cassandra (1.2+) and DataStax Enterprise (3.1+) using exclusively Cassandra's binary protocol and Cassandra Query Language v3. The driver supports Python 2.6, 2.7, 3.3, and 3.4*. -* cqlengine component presently supports Python 2.7+ +\* cqlengine component presently supports Python 2.7+ **Help us focus our efforts!** Provide your input on the `Platform and Runtime Survey `_. +Features +-------- +* `Synchronous `_ and `Asynchronous `_ APIs +* `Simple, Prepared, and Batch statements `_ +* Asynchronous IO, parallel execution, request pipelining +* `Connection pooling `_ +* Automatic node discovery +* `Automatic reconnection `_ +* Configurable `load balancing `_ and `retry policies `_ +* `Concurrent execution utilities `_ +* `Object mapper `_ + Installation ------------ Installation through pip is recommended:: From c050a96bc711006ee33cec24ca13c84687a8420b Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 1 Jun 2015 15:46:49 -0500 Subject: [PATCH 1488/3726] Update README.rst --- README.rst | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/README.rst b/README.rst index 261d1e31a9..ce818a3b57 100644 --- a/README.rst +++ b/README.rst @@ -10,7 +10,9 @@ The driver supports Python 2.6, 2.7, 3.3, and 3.4*. \* cqlengine component presently supports Python 2.7+ -**Help us focus our efforts!** Provide your input on the `Platform and Runtime Survey `_. +Feedback Requested +------------------ +**Help us focus our efforts!** Provide your input on the `Platform and Runtime Survey `_ (we kept it short). Features -------- From f7fac976d875304b92a9894f7e85a70ffc2f7fbb Mon Sep 17 00:00:00 2001 From: jpuerta Date: Mon, 1 Jun 2015 10:42:47 +0100 Subject: [PATCH 1489/3726] cqle: backport cqlengine to support Python 2.6 https://datastax-oss.atlassian.net/browse/PYTHON-288 Add Python 2.6 compliancy: - Remove dict/set comprehensions. - Use cassandra's OrderedDict. - Replace str.format() implicit placeholders ('{}') with explicit ones ('{0} {1} ...'). - Implement cqlengine.functions.get_total_seconds() as alternative to timedelta.total_seconds(). cqlengine integration tests remain to be backported. --- README.rst | 2 +- cassandra/cqlengine/columns.py | 71 ++++++++++++++++--------------- cassandra/cqlengine/functions.py | 22 +++++++--- cassandra/cqlengine/management.py | 48 ++++++++++----------- cassandra/cqlengine/models.py | 38 ++++++++--------- cassandra/cqlengine/named.py | 4 +- cassandra/cqlengine/operators.py | 2 +- cassandra/cqlengine/query.py | 32 +++++++------- cassandra/cqlengine/statements.py | 56 ++++++++++++------------ cassandra/cqlengine/usertype.py | 6 +-- 10 files changed, 145 insertions(+), 136 deletions(-) diff --git a/README.rst b/README.rst index ce818a3b57..110505da97 100644 --- a/README.rst +++ b/README.rst @@ -8,7 +8,7 @@ A modern, `feature-rich `_ a The driver supports Python 2.6, 2.7, 3.3, and 3.4*. -\* cqlengine component presently supports Python 2.7+ +\* cqlengine component presently supports Python 2.6+ Feedback Requested ------------------ diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index d418b11947..b5c49ecea6 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -21,6 +21,7 @@ from cassandra import util from cassandra.cqltypes import DateType, SimpleDateType from cassandra.cqlengine import ValidationError +from cassandra.cqlengine.functions import get_total_seconds log = logging.getLogger(__name__) @@ -186,7 +187,7 @@ def validate(self, value): """ if value is None: if self.required: - raise ValidationError('{} - None values are not allowed'.format(self.column_name or self.db_field)) + raise ValidationError('{0} - None values are not allowed'.format(self.column_name or self.db_field)) return value def to_python(self, value): @@ -228,7 +229,7 @@ def get_column_def(self): Returns a column definition for CQL table definition """ static = "static" if self.static else "" - return '{} {} {}'.format(self.cql, self.db_type, static) + return '{0} {1} {2}'.format(self.cql, self.db_type, static) # TODO: make columns use cqltypes under the hood # until then, this bridges the gap in using types along with cassandra.metadata for CQL generation @@ -250,14 +251,14 @@ def db_field_name(self): @property def db_index_name(self): """ Returns the name of the cql index """ - return 'index_{}'.format(self.db_field_name) + return 'index_{0}'.format(self.db_field_name) @property def cql(self): return self.get_cql() def get_cql(self): - return '"{}"'.format(self.db_field_name) + return '"{0}"'.format(self.db_field_name) def _val_is_null(self, val): """ determines if the given value equates to a null value for the given column type """ @@ -323,13 +324,13 @@ def validate(self, value): if value is None: return if not isinstance(value, (six.string_types, bytearray)) and value is not None: - raise ValidationError('{} {} is not a string'.format(self.column_name, type(value))) + raise ValidationError('{0} {1} is not a string'.format(self.column_name, type(value))) if self.max_length: if len(value) > self.max_length: - raise ValidationError('{} is longer than {} characters'.format(self.column_name, self.max_length)) + raise ValidationError('{0} is longer than {1} characters'.format(self.column_name, self.max_length)) if self.min_length: if len(value) < self.min_length: - raise ValidationError('{} is shorter than {} characters'.format(self.column_name, self.min_length)) + raise ValidationError('{0} is shorter than {1} characters'.format(self.column_name, self.min_length)) return value @@ -347,7 +348,7 @@ def validate(self, value): try: return int(val) except (TypeError, ValueError): - raise ValidationError("{} {} can't be converted to integral value".format(self.column_name, value)) + raise ValidationError("{0} {1} can't be converted to integral value".format(self.column_name, value)) def to_python(self, value): return self.validate(value) @@ -399,7 +400,7 @@ def validate(self, value): return int(val) except (TypeError, ValueError): raise ValidationError( - "{} {} can't be converted to integral value".format(self.column_name, value)) + "{0} {1} can't be converted to integral value".format(self.column_name, value)) def to_python(self, value): return self.validate(value) @@ -463,11 +464,11 @@ def to_database(self, value): if isinstance(value, date): value = datetime(value.year, value.month, value.day) else: - raise ValidationError("{} '{}' is not a datetime object".format(self.column_name, value)) + raise ValidationError("{0} '{1}' is not a datetime object".format(self.column_name, value)) epoch = datetime(1970, 1, 1, tzinfo=value.tzinfo) - offset = epoch.tzinfo.utcoffset(epoch).total_seconds() if epoch.tzinfo else 0 + offset = get_total_seconds(epoch.tzinfo.utcoffset(epoch)) if epoch.tzinfo else 0 - return int(((value - epoch).total_seconds() - offset) * 1000) + return int((get_total_seconds(value - epoch) - offset) * 1000) class Date(Column): @@ -530,7 +531,7 @@ def validate(self, value): except ValueError: # fall-through to error pass - raise ValidationError("{} {} is not a valid uuid".format( + raise ValidationError("{0} {1} is not a valid uuid".format( self.column_name, value)) def to_python(self, value): @@ -561,8 +562,8 @@ def from_datetime(self, dt): global _last_timestamp epoch = datetime(1970, 1, 1, tzinfo=dt.tzinfo) - offset = epoch.tzinfo.utcoffset(epoch).total_seconds() if epoch.tzinfo else 0 - timestamp = (dt - epoch).total_seconds() - offset + offset = get_total_seconds(epoch.tzinfo.utcoffset(epoch)) if epoch.tzinfo else 0 + timestamp = get_total_seconds(dt - epoch) - offset node = None clock_seq = None @@ -611,7 +612,7 @@ def validate(self, value): try: return float(value) except (TypeError, ValueError): - raise ValidationError("{} {} is not a valid float".format(self.column_name, value)) + raise ValidationError("{0} {1} is not a valid float".format(self.column_name, value)) def to_python(self, value): return self.validate(value) @@ -660,7 +661,7 @@ def validate(self, value): try: return _Decimal(val) except InvalidOperation: - raise ValidationError("{} '{}' can't be coerced to decimal".format(self.column_name, val)) + raise ValidationError("{0} '{1}' can't be coerced to decimal".format(self.column_name, val)) def to_python(self, value): return self.validate(value) @@ -702,7 +703,7 @@ def validate(self, value): # It is dangerous to let collections have more than 65535. # See: https://issues.apache.org/jira/browse/CASSANDRA-5428 if value is not None and len(value) > 65535: - raise ValidationError("{} Collection can't have more than 65535 elements.".format(self.column_name)) + raise ValidationError("{0} Collection can't have more than 65535 elements.".format(self.column_name)) return value def _val_is_null(self, val): @@ -726,7 +727,7 @@ def __init__(self, value_type, strict=True, default=set, **kwargs): type on validation, or raise a validation error, defaults to True """ self.strict = strict - self.db_type = 'set<{}>'.format(value_type.db_type) + self.db_type = 'set<{0}>'.format(value_type.db_type) super(Set, self).__init__(value_type, default=default, **kwargs) def validate(self, value): @@ -736,24 +737,24 @@ def validate(self, value): types = (set,) if self.strict else (set, list, tuple) if not isinstance(val, types): if self.strict: - raise ValidationError('{} {} is not a set object'.format(self.column_name, val)) + raise ValidationError('{0} {1} is not a set object'.format(self.column_name, val)) else: - raise ValidationError('{} {} cannot be coerced to a set object'.format(self.column_name, val)) + raise ValidationError('{0} {1} cannot be coerced to a set object'.format(self.column_name, val)) if None in val: - raise ValidationError("{} None not allowed in a set".format(self.column_name)) + raise ValidationError("{0} None not allowed in a set".format(self.column_name)) - return {self.value_col.validate(v) for v in val} + return set(self.value_col.validate(v) for v in val) def to_python(self, value): if value is None: return set() - return {self.value_col.to_python(v) for v in value} + return set(self.value_col.to_python(v) for v in value) def to_database(self, value): if value is None: return None - return {self.value_col.to_database(v) for v in value} + return set(self.value_col.to_database(v) for v in value) class List(BaseContainerColumn): @@ -766,7 +767,7 @@ def __init__(self, value_type, default=list, **kwargs): """ :param value_type: a column class indicating the types of the value """ - self.db_type = 'list<{}>'.format(value_type.db_type) + self.db_type = 'list<{0}>'.format(value_type.db_type) return super(List, self).__init__(value_type=value_type, default=default, **kwargs) def validate(self, value): @@ -774,9 +775,9 @@ def validate(self, value): if val is None: return if not isinstance(val, (set, list, tuple)): - raise ValidationError('{} {} is not a list object'.format(self.column_name, val)) + raise ValidationError('{0} {1} is not a list object'.format(self.column_name, val)) if None in val: - raise ValidationError("{} None is not allowed in a list".format(self.column_name)) + raise ValidationError("{0} None is not allowed in a list".format(self.column_name)) return [self.value_col.validate(v) for v in val] def to_python(self, value): @@ -802,7 +803,7 @@ def __init__(self, key_type, value_type, default=dict, **kwargs): :param value_type: a column class indicating the types of the value """ - self.db_type = 'map<{}, {}>'.format(key_type.db_type, value_type.db_type) + self.db_type = 'map<{0}, {1}>'.format(key_type.db_type, value_type.db_type) inheritance_comparator = issubclass if isinstance(key_type, type) else isinstance if not inheritance_comparator(key_type, Column): @@ -825,21 +826,21 @@ def validate(self, value): if val is None: return if not isinstance(val, dict): - raise ValidationError('{} {} is not a dict object'.format(self.column_name, val)) + raise ValidationError('{0} {1} is not a dict object'.format(self.column_name, val)) if None in val: raise ValidationError("{} None is not allowed in a map".format(self.column_name)) - return {self.key_col.validate(k): self.value_col.validate(v) for k, v in val.items()} + return dict((self.key_col.validate(k), self.value_col.validate(v)) for k, v in val.items()) def to_python(self, value): if value is None: return {} if value is not None: - return {self.key_col.to_python(k): self.value_col.to_python(v) for k, v in value.items()} + return dict((self.key_col.to_python(k), self.value_col.to_python(v)) for k, v in value.items()) def to_database(self, value): if value is None: return None - return {self.key_col.to_database(k): self.value_col.to_database(v) for k, v in value.items()} + return dict((self.key_col.to_database(k), self.value_col.to_database(v)) for k, v in value.items()) @property def sub_columns(self): @@ -901,7 +902,7 @@ def __init__(self, model): @property def db_field_name(self): - return 'token({})'.format(', '.join(['"{}"'.format(c.db_field_name) for c in self.partition_columns])) + return 'token({0})'.format(', '.join(['"{0}"'.format(c.db_field_name) for c in self.partition_columns])) def to_database(self, value): from cqlengine.functions import Token @@ -910,4 +911,4 @@ def to_database(self, value): return value def get_cql(self): - return "token({})".format(", ".join(c.cql for c in self.partition_columns)) + return "token({0})".format(", ".join(c.cql for c in self.partition_columns)) diff --git a/cassandra/cqlengine/functions.py b/cassandra/cqlengine/functions.py index 5c6e4c7cd6..43c98afea5 100644 --- a/cassandra/cqlengine/functions.py +++ b/cassandra/cqlengine/functions.py @@ -16,6 +16,14 @@ from cassandra.cqlengine import UnicodeMixin, ValidationError +import sys + +if sys.version_info >= (2, 7): + def get_total_seconds(td): + return td.total_seconds() +else: + def get_total_seconds(td): + return 86400*td.days + td.seconds + td.microseconds/1e6 class QueryValue(UnicodeMixin): """ @@ -23,7 +31,7 @@ class QueryValue(UnicodeMixin): be passed into .filter() keyword args """ - format_string = '%({})s' + format_string = '%({0})s' def __init__(self, value): self.value = value @@ -58,7 +66,7 @@ class MinTimeUUID(BaseQueryFunction): http://cassandra.apache.org/doc/cql3/CQL.html#timeuuidFun """ - format_string = 'MinTimeUUID(%({})s)' + format_string = 'MinTimeUUID(%({0})s)' def __init__(self, value): """ @@ -71,8 +79,8 @@ def __init__(self, value): def to_database(self, val): epoch = datetime(1970, 1, 1, tzinfo=val.tzinfo) - offset = epoch.tzinfo.utcoffset(epoch).total_seconds() if epoch.tzinfo else 0 - return int(((val - epoch).total_seconds() - offset) * 1000) + offset = get_total_seconds(epoch.tzinfo.utcoffset(epoch)) if epoch.tzinfo else 0 + return int((get_total_seconds(val - epoch) - offset) * 1000) def update_context(self, ctx): ctx[str(self.context_id)] = self.to_database(self.value) @@ -85,7 +93,7 @@ class MaxTimeUUID(BaseQueryFunction): http://cassandra.apache.org/doc/cql3/CQL.html#timeuuidFun """ - format_string = 'MaxTimeUUID(%({})s)' + format_string = 'MaxTimeUUID(%({0})s)' def __init__(self, value): """ @@ -125,8 +133,8 @@ def get_context_size(self): return len(self.value) def __unicode__(self): - token_args = ', '.join('%({})s'.format(self.context_id + i) for i in range(self.get_context_size())) - return "token({})".format(token_args) + token_args = ', '.join('%({0})s'.format(self.context_id + i) for i in range(self.get_context_size())) + return "token({0})".format(token_args) def update_context(self, ctx): for i, (col, val) in enumerate(zip(self._columns, self.value)): diff --git a/cassandra/cqlengine/management.py b/cassandra/cqlengine/management.py index 5d5e641459..41d714107b 100644 --- a/cassandra/cqlengine/management.py +++ b/cassandra/cqlengine/management.py @@ -79,12 +79,12 @@ def create_keyspace(name, strategy_class, replication_factor, durable_writes=Tru replication_map.pop('replication_factor', None) query = """ - CREATE KEYSPACE {} - WITH REPLICATION = {} + CREATE KEYSPACE {0} + WITH REPLICATION = {1} """.format(metadata.protect_name(name), json.dumps(replication_map).replace('"', "'")) if strategy_class != 'SimpleStrategy': - query += " AND DURABLE_WRITES = {}".format('true' if durable_writes else 'false') + query += " AND DURABLE_WRITES = {0}".format('true' if durable_writes else 'false') execute(query) @@ -163,7 +163,7 @@ def drop_keyspace(name): cluster = get_cluster() if name in cluster.metadata.keyspaces: - execute("DROP KEYSPACE {}".format(metadata.protect_name(name))) + execute("DROP KEYSPACE {0}".format(metadata.protect_name(name))) def sync_table(model): @@ -235,7 +235,7 @@ def sync_table(model): continue # skip columns already defined # add missing column using the column def - query = "ALTER TABLE {} add {}".format(cf_name, col.get_column_def()) + query = "ALTER TABLE {0} add {1}".format(cf_name, col.get_column_def()) execute(query) db_fields_not_in_model = model_fields.symmetric_difference(field_names) @@ -252,9 +252,9 @@ def sync_table(model): if table.columns[column.db_field_name].index: continue - qs = ['CREATE INDEX index_{}_{}'.format(raw_cf_name, column.db_field_name)] - qs += ['ON {}'.format(cf_name)] - qs += ['("{}")'.format(column.db_field_name)] + qs = ['CREATE INDEX index_{0}_{1}'.format(raw_cf_name, column.db_field_name)] + qs += ['ON {0}'.format(cf_name)] + qs += ['("{0}")'.format(column.db_field_name)] qs = ' '.join(qs) execute(qs) @@ -310,7 +310,7 @@ def _sync_type(ks_name, type_model, omit_subtypes=None): for field in type_model._fields.values(): model_fields.add(field.db_field_name) if field.db_field_name not in defined_fields: - execute("ALTER TYPE {} ADD {}".format(type_name_qualified, field.get_column_def())) + execute("ALTER TYPE {0} ADD {1}".format(type_name_qualified, field.get_column_def())) type_model.register_for_keyspace(ks_name) @@ -333,7 +333,7 @@ def get_create_type(type_model, keyspace): def get_create_table(model): cf_name = model.column_family_name() - qs = ['CREATE TABLE {}'.format(cf_name)] + qs = ['CREATE TABLE {0}'.format(cf_name)] # add column types pkeys = [] # primary keys @@ -344,15 +344,15 @@ def add_column(col): s = col.get_column_def() if col.primary_key: keys = (pkeys if col.partition_key else ckeys) - keys.append('"{}"'.format(col.db_field_name)) + keys.append('"{0}"'.format(col.db_field_name)) qtypes.append(s) for name, col in model._columns.items(): add_column(col) - qtypes.append('PRIMARY KEY (({}){})'.format(', '.join(pkeys), ckeys and ', ' + ', '.join(ckeys) or '')) + qtypes.append('PRIMARY KEY (({0}){1})'.format(', '.join(pkeys), ckeys and ', ' + ', '.join(ckeys) or '')) - qs += ['({})'.format(', '.join(qtypes))] + qs += ['({0})'.format(', '.join(qtypes))] with_qs = [] @@ -361,25 +361,25 @@ def add_column(col): 'index_interval', 'memtable_flush_period_in_ms', 'populate_io_cache_on_flush', 'read_repair_chance', 'replicate_on_write'] for prop_name in table_properties: - prop_value = getattr(model, '__{}__'.format(prop_name), None) + prop_value = getattr(model, '__{0}__'.format(prop_name), None) if prop_value is not None: # Strings needs to be single quoted if isinstance(prop_value, six.string_types): - prop_value = "'{}'".format(prop_value) - with_qs.append("{} = {}".format(prop_name, prop_value)) + prop_value = "'{0}'".format(prop_value) + with_qs.append("{0} = {1}".format(prop_name, prop_value)) - _order = ['"{}" {}'.format(c.db_field_name, c.clustering_order or 'ASC') for c in model._clustering_keys.values()] + _order = ['"{0}" {1}'.format(c.db_field_name, c.clustering_order or 'ASC') for c in model._clustering_keys.values()] if _order: - with_qs.append('clustering order by ({})'.format(', '.join(_order))) + with_qs.append('clustering order by ({0})'.format(', '.join(_order))) compaction_options = get_compaction_options(model) if compaction_options: compaction_options = json.dumps(compaction_options).replace('"', "'") - with_qs.append("compaction = {}".format(compaction_options)) + with_qs.append("compaction = {0}".format(compaction_options)) # Add table properties. if with_qs: - qs += ['WITH {}'.format(' AND '.join(with_qs))] + qs += ['WITH {0}'.format(' AND '.join(with_qs))] qs = ' '.join(qs) return qs @@ -405,10 +405,10 @@ def setter(key, limited_to_strategy=None): :param limited_to_strategy: SizeTieredCompactionStrategy, LeveledCompactionStrategy :return: """ - mkey = "__compaction_{}__".format(key) + mkey = "__compaction_{0}__".format(key) tmp = getattr(model, mkey) if tmp and limited_to_strategy and limited_to_strategy != model.__compaction__: - raise CQLEngineException("{} is limited to {}".format(key, limited_to_strategy)) + raise CQLEngineException("{0} is limited to {1}".format(key, limited_to_strategy)) if tmp: # Explicitly cast the values to strings to be able to compare the @@ -496,7 +496,7 @@ def update_compaction(model): # jsonify options = json.dumps(options).replace('"', "'") cf_name = model.column_family_name() - query = "ALTER TABLE {} with compaction = {}".format(cf_name, options) + query = "ALTER TABLE {0} with compaction = {1}".format(cf_name, options) execute(query) return True @@ -523,7 +523,7 @@ def drop_table(model): try: meta.keyspaces[ks_name].tables[raw_cf_name] - execute('DROP TABLE {};'.format(model.column_family_name())) + execute('DROP TABLE {0};'.format(model.column_family_name())) except KeyError: pass diff --git a/cassandra/cqlengine/models.py b/cassandra/cqlengine/models.py index 4f6d120250..b0f86d990e 100644 --- a/cassandra/cqlengine/models.py +++ b/cassandra/cqlengine/models.py @@ -280,7 +280,7 @@ def __delete__(self, instance): if self.column.can_delete: instance._values[self.column.column_name].delval() else: - raise AttributeError('cannot delete {} columns'.format(self.column.column_name)) + raise AttributeError('cannot delete {0} columns'.format(self.column.column_name)) class BaseModel(object): @@ -378,8 +378,8 @@ def __init__(self, **values): self._timeout = connection.NOT_SET def __repr__(self): - return '{}({})'.format(self.__class__.__name__, - ', '.join('{}={!r}'.format(k, getattr(self, k)) + return '{0}({1})'.format(self.__class__.__name__, + ', '.join('{0}={1!r}'.format(k, getattr(self, k)) for k in self._defined_columns.keys() if k != self._discriminator_column_name)) @@ -387,8 +387,8 @@ def __str__(self): """ Pretty printing of models by their primary key """ - return '{} <{}>'.format(self.__class__.__name__, - ', '.join('{}={}'.format(k, getattr(self, k)) for k in self._primary_keys.keys())) + return '{0} <{1}>'.format(self.__class__.__name__, + ', '.join('{0}={1}'.format(k, getattr(self, k)) for k in self._primary_keys.keys())) @classmethod def _discover_polymorphic_submodels(cls): @@ -433,16 +433,16 @@ def _construct_instance(cls, values): poly_base._discover_polymorphic_submodels() klass = poly_base._get_model_by_discriminator_value(disc_key) if klass is None: - raise PolymorphicModelException( - 'unrecognized discriminator column {} for class {}'.format(disc_key, poly_base.__name__) + raise PolyMorphicModelException( + 'unrecognized polymorphic key {0} for class {1}'.format(poly_key, poly_base.__name__) ) if not issubclass(klass, cls): - raise PolymorphicModelException( - '{} is not a subclass of {}'.format(klass.__name__, cls.__name__) + raise PolyMorphicModelException( + '{0} is not a subclass of {1}'.format(klass.__name__, cls.__name__) ) - field_dict = {k: v for k, v in field_dict.items() if k in klass._columns.keys()} + field_dict = dict((k, v) for (k, v) in field_dict.items() if k in klass._columns.keys()) else: klass = cls @@ -509,7 +509,7 @@ def column_family_name(cls, include_keyspace=True): """ cf_name = protect_name(cls._raw_column_family_name()) if include_keyspace: - return '{}.{}'.format(protect_name(cls._get_keyspace()), cf_name) + return '{0}.{1}'.format(protect_name(cls._get_keyspace()), cf_name) return cf_name @@ -524,7 +524,7 @@ def _raw_column_family_name(cls): cls._table_name = cls._polymorphic_base._raw_column_family_name() else: camelcase = re.compile(r'([a-z])([A-Z])') - ccase = lambda s: camelcase.sub(lambda v: '{}_{}'.format(v.group(1), v.group(2).lower()), s) + ccase = lambda s: camelcase.sub(lambda v: '{0}_{1}'.format(v.group(1), v.group(2).lower()), s) cf_name = ccase(cls.__name__) # trim to less than 48 characters or cassandra will complain @@ -608,7 +608,7 @@ def create(cls, **kwargs): """ extra_columns = set(kwargs.keys()) - set(cls._columns.keys()) if extra_columns: - raise ValidationError("Incorrect columns passed: {}".format(extra_columns)) + raise ValidationError("Incorrect columns passed: {0}".format(extra_columns)) return cls.objects.create(**kwargs) @classmethod @@ -701,11 +701,11 @@ def update(self, **values): # check for nonexistant columns if col is None: - raise ValidationError("{}.{} has no column named: {}".format(self.__module__, self.__class__.__name__, k)) + raise ValidationError("{0}.{1} has no column named: {2}".format(self.__module__, self.__class__.__name__, k)) # check for primary key update attempts if col.is_primary_key: - raise ValidationError("Cannot apply update to primary key '{}' for {}.{}".format(k, self.__module__, self.__class__.__name__)) + raise ValidationError("Cannot apply update to primary key '{0}' for {1}.{2}".format(k, self.__module__, self.__class__.__name__)) setattr(self, k, v) @@ -808,7 +808,7 @@ def _transform_column(col_name, col_obj): discriminator_columns = [c for c in column_definitions if c[1].discriminator_column] is_polymorphic = len(discriminator_columns) > 0 if len(discriminator_columns) > 1: - raise ModelDefinitionException('only one discriminator_column (polymorphic_key (deprecated)) can be defined in a model, {} found'.format(len(discriminator_columns))) + raise ModelDefinitionException('only one discriminator_column (polymorphic_key (deprecated)) can be defined in a model, {0} found'.format(len(discriminator_columns))) if attrs['__discriminator_value__'] and not is_polymorphic: raise ModelDefinitionException('__discriminator_value__ specified, but no base columns defined with discriminator_column=True') @@ -847,7 +847,7 @@ def _get_polymorphic_base(bases): for k, v in column_definitions: # don't allow a column with the same name as a built-in attribute or method if k in BaseModel.__dict__: - raise ModelDefinitionException("column '{}' conflicts with built-in attribute/method".format(k)) + raise ModelDefinitionException("column '{0}' conflicts with built-in attribute/method".format(k)) # counter column primary keys are not allowed if (v.primary_key or v.partition_key) and isinstance(v, (columns.Counter, columns.BaseContainerColumn)): @@ -881,11 +881,11 @@ def _get_polymorphic_base(bases): for v in column_dict.values(): # check for duplicate column names if v.db_field_name in col_names: - raise ModelException("{} defines the column {} more than once".format(name, v.db_field_name)) + raise ModelException("{0} defines the column {1} more than once".format(name, v.db_field_name)) if v.clustering_order and not (v.primary_key and not v.partition_key): raise ModelException("clustering_order may be specified only for clustering primary keys") if v.clustering_order and v.clustering_order.lower() not in ('asc', 'desc'): - raise ModelException("invalid clustering order {} for column {}".format(repr(v.clustering_order), v.db_field_name)) + raise ModelException("invalid clustering order {0} for column {1}".format(repr(v.clustering_order), v.db_field_name)) col_names.add(v.db_field_name) # create db_name -> model name map for loading diff --git a/cassandra/cqlengine/named.py b/cassandra/cqlengine/named.py index b38c07820e..7bc229311d 100644 --- a/cassandra/cqlengine/named.py +++ b/cassandra/cqlengine/named.py @@ -63,7 +63,7 @@ def cql(self): return self.get_cql() def get_cql(self): - return '"{}"'.format(self.name) + return '"{0}"'.format(self.name) def to_database(self, val): return val @@ -97,7 +97,7 @@ def column_family_name(self, include_keyspace=True): otherwise, it creates it from the module and class name """ if include_keyspace: - return '{}.{}'.format(self.keyspace, self.name) + return '{0}.{1}'.format(self.keyspace, self.name) else: return self.name diff --git a/cassandra/cqlengine/operators.py b/cassandra/cqlengine/operators.py index 08c8ccb318..5ddbfed83b 100644 --- a/cassandra/cqlengine/operators.py +++ b/cassandra/cqlengine/operators.py @@ -50,7 +50,7 @@ def _recurse(klass): try: return cls.opmap[symbol.upper()] except KeyError: - raise QueryOperatorException("{} doesn't map to a QueryOperator".format(symbol)) + raise QueryOperatorException("{0} doesn't map to a QueryOperator".format(symbol)) class BaseWhereOperator(BaseQueryOperator): diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index 2a5c9744dc..19b06552f9 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -174,7 +174,7 @@ def add_callback(self, fn, *args, **kwargs): :param **kwargs: Named arguments to be passed to the callback at the time of execution """ if not callable(fn): - raise ValueError("Value for argument 'fn' is {} and is not a callable object.".format(type(fn))) + raise ValueError("Value for argument 'fn' is {0} and is not a callable object.".format(type(fn))) self._callbacks.append((fn, args, kwargs)) def execute(self): @@ -197,7 +197,7 @@ def execute(self): else: raise ValueError("Batch expects a long, a timedelta, or a datetime") - opener += ' USING TIMESTAMP {}'.format(ts) + opener += ' USING TIMESTAMP {0}'.format(ts) query_list = [opener] parameters = {} @@ -453,7 +453,7 @@ def _parse_filter_arg(self, arg): elif len(statement) == 2: return statement[0], statement[1] else: - raise QueryException("Can't parse '{}'".format(arg)) + raise QueryException("Can't parse '{0}'".format(arg)) def iff(self, *args, **kwargs): """Adds IF statements to queryset""" @@ -463,7 +463,7 @@ def iff(self, *args, **kwargs): clone = copy.deepcopy(self) for operator in args: if not isinstance(operator, TransactionClause): - raise QueryException('{} is not a valid query operator'.format(operator)) + raise QueryException('{0} is not a valid query operator'.format(operator)) clone._transaction.append(operator) for col_name, val in kwargs.items(): @@ -476,7 +476,7 @@ def iff(self, *args, **kwargs): raise QueryException("Virtual column 'pk__token' may only be compared to Token() values") column = columns._PartitionKeysToken(self.model) else: - raise QueryException("Can't resolve column name: '{}'".format(col_name)) + raise QueryException("Can't resolve column name: '{0}'".format(col_name)) if isinstance(val, Token): if col_name != 'pk__token': @@ -484,7 +484,7 @@ def iff(self, *args, **kwargs): partition_columns = column.partition_columns if len(partition_columns) != len(val.value): raise QueryException( - 'Token() received {} arguments but model has {} partition keys'.format( + 'Token() received {0} arguments but model has {1} partition keys'.format( len(val.value), len(partition_columns))) val.set_columns(partition_columns) @@ -512,7 +512,7 @@ def filter(self, *args, **kwargs): clone = copy.deepcopy(self) for operator in args: if not isinstance(operator, WhereClause): - raise QueryException('{} is not a valid query operator'.format(operator)) + raise QueryException('{0} is not a valid query operator'.format(operator)) clone._where.append(operator) for arg, val in kwargs.items(): @@ -528,7 +528,7 @@ def filter(self, *args, **kwargs): column = columns._PartitionKeysToken(self.model) quote_field = False else: - raise QueryException("Can't resolve column name: '{}'".format(col_name)) + raise QueryException("Can't resolve column name: '{0}'".format(col_name)) if isinstance(val, Token): if col_name != 'pk__token': @@ -536,7 +536,7 @@ def filter(self, *args, **kwargs): partition_columns = column.partition_columns if len(partition_columns) != len(val.value): raise QueryException( - 'Token() received {} arguments but model has {} partition keys'.format( + 'Token() received {0} arguments but model has {1} partition keys'.format( len(val.value), len(partition_columns))) val.set_columns(partition_columns) @@ -580,7 +580,7 @@ def get(self, *args, **kwargs): if len(self._result_cache) == 0: raise self.model.DoesNotExist elif len(self._result_cache) > 1: - raise self.model.MultipleObjectsReturned('{} objects found'.format(len(self._result_cache))) + raise self.model.MultipleObjectsReturned('{0} objects found'.format(len(self._result_cache))) else: return self[0] @@ -628,7 +628,7 @@ class Comment(Model): conditions = [] for colname in colnames: - conditions.append('"{}" {}'.format(*self._get_ordering_condition(colname))) + conditions.append('"{0}" {1}'.format(*self._get_ordering_condition(colname))) clone = copy.deepcopy(self) clone._order.extend(conditions) @@ -689,7 +689,7 @@ def _only_or_defer(self, action, fields): missing_fields = [f for f in fields if f not in self.model._columns.keys()] if missing_fields: raise QueryException( - "Can't resolve fields {} in {}".format( + "Can't resolve fields {0} in {1}".format( ', '.join(missing_fields), self.model.__name__)) if action == 'defer': @@ -821,12 +821,12 @@ def _get_ordering_condition(self, colname): column = self.model._columns.get(colname) if column is None: - raise QueryException("Can't resolve the column name: '{}'".format(colname)) + raise QueryException("Can't resolve the column name: '{0}'".format(colname)) # validate the column selection if not column.primary_key: raise QueryException( - "Can't order on '{}', can only order on (clustered) primary keys".format(colname)) + "Can't order on '{0}', can only order on (clustered) primary keys".format(colname)) pks = [v for k, v in self.model._columns.items() if v.primary_key] if column == pks[0]: @@ -971,10 +971,10 @@ class Row(Model): col = self.model._columns.get(col_name) # check for nonexistant columns if col is None: - raise ValidationError("{}.{} has no column named: {}".format(self.__module__, self.model.__name__, col_name)) + raise ValidationError("{0}.{1} has no column named: {2}".format(self.__module__, self.model.__name__, col_name)) # check for primary key update attempts if col.is_primary_key: - raise ValidationError("Cannot apply update to primary key '{}' for {}.{}".format(col_name, self.__module__, self.model.__name__)) + raise ValidationError("Cannot apply update to primary key '{0}' for {1}.{2}".format(col_name, self.__module__, self.model.__name__)) # we should not provide default values in this use case. val = col.validate(val) diff --git a/cassandra/cqlengine/statements.py b/cassandra/cqlengine/statements.py index 866f6f2958..c1a7a51130 100644 --- a/cassandra/cqlengine/statements.py +++ b/cassandra/cqlengine/statements.py @@ -108,7 +108,7 @@ def __init__(self, field, operator, value, quote_field=True): """ if not isinstance(operator, BaseWhereOperator): raise StatementException( - "operator must be of type {}, got {}".format(BaseWhereOperator, type(operator)) + "operator must be of type {0}, got {1}".format(BaseWhereOperator, type(operator)) ) super(WhereClause, self).__init__(field, value) self.operator = operator @@ -116,8 +116,8 @@ def __init__(self, field, operator, value, quote_field=True): self.quote_field = quote_field def __unicode__(self): - field = ('"{}"' if self.quote_field else '{}').format(self.field) - return u'{} {} {}'.format(field, self.operator, six.text_type(self.query_value)) + field = ('"{0}"' if self.quote_field else '{0}').format(self.field) + return u'{0} {1} {2}'.format(field, self.operator, six.text_type(self.query_value)) def __hash__(self): return super(WhereClause, self).__hash__() ^ hash(self.operator) @@ -145,7 +145,7 @@ class AssignmentClause(BaseClause): """ a single variable st statement """ def __unicode__(self): - return u'"{}" = %({})s'.format(self.field, self.context_id) + return u'"{0}" = %({1})s'.format(self.field, self.context_id) def insert_tuple(self): return self.field, self.context_id @@ -155,7 +155,7 @@ class TransactionClause(BaseClause): """ A single variable iff statement """ def __unicode__(self): - return u'"{}" = %({})s'.format(self.field, self.context_id) + return u'"{0}" = %({1})s'.format(self.field, self.context_id) def insert_tuple(self): return self.field, self.context_id @@ -199,9 +199,9 @@ def __unicode__(self): self._assignments is None and self._additions is None and self._removals is None): - qs += ['"{}" = %({})s'.format(self.field, ctx_id)] + qs += ['"{0}" = %({1})s'.format(self.field, ctx_id)] if self._assignments is not None: - qs += ['"{}" = %({})s'.format(self.field, ctx_id)] + qs += ['"{0}" = %({1})s'.format(self.field, ctx_id)] ctx_id += 1 if self._additions is not None: qs += ['"{0}" = "{0}" + %({1})s'.format(self.field, ctx_id)] @@ -270,7 +270,7 @@ def __unicode__(self): qs = [] ctx_id = self.context_id if self._assignments is not None: - qs += ['"{}" = %({})s'.format(self.field, ctx_id)] + qs += ['"{0}" = %({1})s'.format(self.field, ctx_id)] ctx_id += 1 if self._prepend is not None: @@ -398,10 +398,10 @@ def __unicode__(self): ctx_id = self.context_id if self.previous is None and not self._updates: - qs += ['"{}" = %({})s'.format(self.field, ctx_id)] + qs += ['"{0}" = %({1})s'.format(self.field, ctx_id)] else: for _ in self._updates or []: - qs += ['"{}"[%({})s] = %({})s'.format(self.field, ctx_id, ctx_id + 1)] + qs += ['"{0}"[%({1})s] = %({2})s'.format(self.field, ctx_id, ctx_id + 1)] ctx_id += 2 return ', '.join(qs) @@ -436,7 +436,7 @@ def __init__(self, field): super(FieldDeleteClause, self).__init__(field, None) def __unicode__(self): - return '"{}"'.format(self.field) + return '"{0}"'.format(self.field) def update_context(self, ctx): pass @@ -473,7 +473,7 @@ def get_context_size(self): def __unicode__(self): if not self._analyzed: self._analyze() - return ', '.join(['"{}"[%({})s]'.format(self.field, self.context_id + i) for i in range(len(self._removals))]) + return ', '.join(['"{0}"[%({1})s]'.format(self.field, self.context_id + i) for i in range(len(self._removals))]) class BaseCQLStatement(UnicodeMixin): @@ -550,7 +550,7 @@ def __repr__(self): @property def _where(self): - return 'WHERE {}'.format(' AND '.join([six.text_type(c) for c in self.where_clauses])) + return 'WHERE {0}'.format(' AND '.join([six.text_type(c) for c in self.where_clauses])) class SelectStatement(BaseCQLStatement): @@ -587,17 +587,17 @@ def __unicode__(self): if self.count: qs += ['COUNT(*)'] else: - qs += [', '.join(['"{}"'.format(f) for f in self.fields]) if self.fields else '*'] + qs += [', '.join(['"{0}"'.format(f) for f in self.fields]) if self.fields else '*'] qs += ['FROM', self.table] if self.where_clauses: qs += [self._where] if self.order_by and not self.count: - qs += ['ORDER BY {}'.format(', '.join(six.text_type(o) for o in self.order_by))] + qs += ['ORDER BY {0}'.format(', '.join(six.text_type(o) for o in self.order_by))] if self.limit: - qs += ['LIMIT {}'.format(self.limit)] + qs += ['LIMIT {0}'.format(self.limit)] if self.allow_filtering: qs += ['ALLOW FILTERING'] @@ -681,24 +681,24 @@ def add_where_clause(self, clause): raise StatementException("Cannot add where clauses to insert statements") def __unicode__(self): - qs = ['INSERT INTO {}'.format(self.table)] + qs = ['INSERT INTO {0}'.format(self.table)] # get column names and context placeholders fields = [a.insert_tuple() for a in self.assignments] columns, values = zip(*fields) - qs += ["({})".format(', '.join(['"{}"'.format(c) for c in columns]))] + qs += ["({0})".format(', '.join(['"{0}"'.format(c) for c in columns]))] qs += ['VALUES'] - qs += ["({})".format(', '.join(['%({})s'.format(v) for v in values]))] + qs += ["({0})".format(', '.join(['%({0})s'.format(v) for v in values]))] if self.if_not_exists: qs += ["IF NOT EXISTS"] if self.ttl: - qs += ["USING TTL {}".format(self.ttl)] + qs += ["USING TTL {0}".format(self.ttl)] if self.timestamp: - qs += ["USING TIMESTAMP {}".format(self.timestamp_normalized)] + qs += ["USING TIMESTAMP {0}".format(self.timestamp_normalized)] return ' '.join(qs) @@ -732,13 +732,13 @@ def __unicode__(self): using_options = [] if self.ttl: - using_options += ["TTL {}".format(self.ttl)] + using_options += ["TTL {0}".format(self.ttl)] if self.timestamp: - using_options += ["TIMESTAMP {}".format(self.timestamp_normalized)] + using_options += ["TIMESTAMP {0}".format(self.timestamp_normalized)] if using_options: - qs += ["USING {}".format(" AND ".join(using_options))] + qs += ["USING {0}".format(" AND ".join(using_options))] qs += ['SET'] qs += [', '.join([six.text_type(c) for c in self.assignments])] @@ -771,7 +771,7 @@ def get_context(self): return ctx def _get_transactions(self): - return 'IF {}'.format(' AND '.join([six.text_type(c) for c in self.transactions])) + return 'IF {0}'.format(' AND '.join([six.text_type(c) for c in self.transactions])) def update_context_id(self, i): super(UpdateStatement, self).update_context_id(i) @@ -820,16 +820,16 @@ def add_field(self, field): def __unicode__(self): qs = ['DELETE'] if self.fields: - qs += [', '.join(['{}'.format(f) for f in self.fields])] + qs += [', '.join(['{0}'.format(f) for f in self.fields])] qs += ['FROM', self.table] delete_option = [] if self.timestamp: - delete_option += ["TIMESTAMP {}".format(self.timestamp_normalized)] + delete_option += ["TIMESTAMP {0}".format(self.timestamp_normalized)] if delete_option: - qs += [" USING {} ".format(" AND ".join(delete_option))] + qs += [" USING {0} ".format(" AND ".join(delete_option))] if self.where_clauses: qs += [self._where] diff --git a/cassandra/cqlengine/usertype.py b/cassandra/cqlengine/usertype.py index 1e30fc829c..88ec033ba8 100644 --- a/cassandra/cqlengine/usertype.py +++ b/cassandra/cqlengine/usertype.py @@ -56,7 +56,7 @@ def __ne__(self, other): return not self.__eq__(other) def __str__(self): - return "{{{}}}".format(', '.join("'{}': {}".format(k, getattr(self, k)) for k, v in six.iteritems(self._values))) + return "{{{0}}}".format(', '.join("'{0}': {1}".format(k, getattr(self, k)) for k, v in six.iteritems(self._values))) def has_changed_fields(self): return any(v.changed for v in self._values.values()) @@ -116,7 +116,7 @@ def type_name(cls): type_name = cls.__type_name__.lower() else: camelcase = re.compile(r'([a-z])([A-Z])') - ccase = lambda s: camelcase.sub(lambda v: '{}_{}'.format(v.group(1), v.group(2)), s) + ccase = lambda s: camelcase.sub(lambda v: '{0}_{1}'.format(v.group(1), v.group(2)), s) type_name = ccase(cls.__name__) # trim to less than 48 characters or cassandra will complain @@ -157,7 +157,7 @@ def _transform_column(field_name, field_obj): for k, v in field_defs: # don't allow a field with the same name as a built-in attribute or method if k in BaseUserType.__dict__: - raise UserTypeDefinitionException("field '{}' conflicts with built-in attribute/method".format(k)) + raise UserTypeDefinitionException("field '{0}' conflicts with built-in attribute/method".format(k)) _transform_column(k, v) # create db_name -> model name map for loading From 8bfcfb8824ce46f0d1cfc2735f70d916cabceb81 Mon Sep 17 00:00:00 2001 From: jpuerta Date: Mon, 1 Jun 2015 11:33:43 +0100 Subject: [PATCH 1490/3726] cqle: Fix minor issues added in previous commit Changes: - Extra whitespace added during merge - Replace (k, v) syntax in dict comprehension with k, v - Discriminator column for polymorphic key --- cassandra/cqlengine/columns.py | 2 +- cassandra/cqlengine/models.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index b5c49ecea6..bba3232c6f 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -563,7 +563,7 @@ def from_datetime(self, dt): epoch = datetime(1970, 1, 1, tzinfo=dt.tzinfo) offset = get_total_seconds(epoch.tzinfo.utcoffset(epoch)) if epoch.tzinfo else 0 - timestamp = get_total_seconds(dt - epoch) - offset + timestamp = get_total_seconds(dt - epoch) - offset node = None clock_seq = None diff --git a/cassandra/cqlengine/models.py b/cassandra/cqlengine/models.py index b0f86d990e..58fc8a94e3 100644 --- a/cassandra/cqlengine/models.py +++ b/cassandra/cqlengine/models.py @@ -434,7 +434,7 @@ def _construct_instance(cls, values): klass = poly_base._get_model_by_discriminator_value(disc_key) if klass is None: raise PolyMorphicModelException( - 'unrecognized polymorphic key {0} for class {1}'.format(poly_key, poly_base.__name__) + 'unrecognized discriminator column {0} for class {1}'.format(poly_key, poly_base.__name__) ) if not issubclass(klass, cls): @@ -442,7 +442,7 @@ def _construct_instance(cls, values): '{0} is not a subclass of {1}'.format(klass.__name__, cls.__name__) ) - field_dict = dict((k, v) for (k, v) in field_dict.items() if k in klass._columns.keys()) + field_dict = dict((k, v) for k, v in field_dict.items() if k in klass._columns.keys()) else: klass = cls From be0d0037ea0227897e665be1f522ab66e20ad084 Mon Sep 17 00:00:00 2001 From: jpuerta Date: Tue, 2 Jun 2015 16:29:12 +0100 Subject: [PATCH 1491/3726] cqle: Fix minor issues with polymorphic models --- cassandra/cqlengine/models.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cassandra/cqlengine/models.py b/cassandra/cqlengine/models.py index 58fc8a94e3..ddb7945995 100644 --- a/cassandra/cqlengine/models.py +++ b/cassandra/cqlengine/models.py @@ -433,12 +433,12 @@ def _construct_instance(cls, values): poly_base._discover_polymorphic_submodels() klass = poly_base._get_model_by_discriminator_value(disc_key) if klass is None: - raise PolyMorphicModelException( - 'unrecognized discriminator column {0} for class {1}'.format(poly_key, poly_base.__name__) + raise PolymorphicModelException( + 'unrecognized discriminator column {0} for class {1}'.format(disc_key, poly_base.__name__) ) if not issubclass(klass, cls): - raise PolyMorphicModelException( + raise PolymorphicModelException( '{0} is not a subclass of {1}'.format(klass.__name__, cls.__name__) ) From 8949200222343407ba6201a59f10169f089be2de Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 2 Jun 2015 12:11:32 -0500 Subject: [PATCH 1492/3726] Update custom payload value encoding for CASSADNRA-9515 https://issues.apache.org/jira/browse/CASSANDRA-9515 --- cassandra/protocol.py | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/cassandra/protocol.py b/cassandra/protocol.py index 1c851f8c5d..20904ecec3 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -1019,11 +1019,6 @@ def read_binary_string(f): return contents -def write_binary_string(f, s): - write_short(f, len(s)) - f.write(s) - - def write_string(f, s): if isinstance(s, six.text_type): s = s.encode('utf8') @@ -1080,7 +1075,7 @@ def read_bytesmap(f): bytesmap = {} for _ in range(numpairs): k = read_string(f) - bytesmap[k] = read_binary_string(f) + bytesmap[k] = read_value(f) return bytesmap @@ -1088,7 +1083,7 @@ def write_bytesmap(f, bytesmap): write_short(f, len(bytesmap)) for k, v in bytesmap.items(): write_string(f, k) - write_binary_string(f, v) + write_value(f, v) def read_stringmultimap(f): From ec241de0feb2c888a59b7c3a935f0562d8084716 Mon Sep 17 00:00:00 2001 From: Ashwin Rajeev Date: Thu, 4 Jun 2015 12:52:38 +0530 Subject: [PATCH 1493/3726] fix unicode encoding issue for query.bind_params --- cassandra/cluster.py | 2 -- cassandra/query.py | 2 ++ 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index cb2502e7d2..2708736089 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1653,8 +1653,6 @@ def _create_response_future(self, query, parameters, trace, custom_payload): if isinstance(query, SimpleStatement): query_string = query.query_string - if six.PY2 and isinstance(query_string, six.text_type): - query_string = query_string.encode('utf-8') if parameters: query_string = bind_params(query_string, parameters, self.encoder) message = QueryMessage( diff --git a/cassandra/query.py b/cassandra/query.py index 6709cbeff6..19d227beb8 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -793,6 +793,8 @@ def __str__(self): def bind_params(query, params, encoder): + if six.PY2 and isinstance(query, six.text_type): + query = query.encode('utf-8') if isinstance(params, dict): return query % dict((k, encoder.cql_encode_all_types(v)) for k, v in six.iteritems(params)) else: From cf1432683265bc2e232ec8f060171be7ef0bc1b3 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 4 Jun 2015 09:06:58 -0500 Subject: [PATCH 1494/3726] Finalize changelog for 2.6.0c1 --- CHANGELOG.rst | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 63f4185c30..0ba2a4ac6c 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,5 +1,6 @@ 2.6.0rc1 ===== +June 4, 2015 This release adds support for Cassandra 2.2 features, including version 4 of the native protocol. @@ -15,7 +16,7 @@ Features * Support new types in C* 2.2: date, time, smallint, tinyint (PYTHON-245, 295) * cqle: add Double column type and remove Float overload (PYTHON-246) * Use partition key column information in prepared response for protocol v4+ (PYTHON-277) -* Support message custom payloads in protocol v4+ (PYTHON-280) +* Support message custom payloads in protocol v4+ (PYTHON-280, PYTHON-329) * Deprecate refresh_schema and replace with functions for specific entities (PYTHON-291) * Save trace id even when trace complete times out (PYTHON-302) * Warn when registering client UDT class for protocol < v3 (PYTHON-305) From 0dc0d584de0a8201ea249572cbf33a6b589e7fdc Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 4 Jun 2015 09:08:33 -0500 Subject: [PATCH 1495/3726] 2.6.0c1 version --- cassandra/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 3f61909664..379f6d4534 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -23,7 +23,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (2, 5, 1, 'post') +__version_info__ = (2, 6, '0c1') __version__ = '.'.join(map(str, __version_info__)) From db9bf9b7973441282217a03001dbbcb8b6c0d8a5 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 4 Jun 2015 09:57:51 -0500 Subject: [PATCH 1496/3726] Fix cassandra.protocol API docs --- docs/api/cassandra/protocol.rst | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/docs/api/cassandra/protocol.rst b/docs/api/cassandra/protocol.rst index a6b4d1c41a..52f1287a6c 100644 --- a/docs/api/cassandra/protocol.rst +++ b/docs/api/cassandra/protocol.rst @@ -1,7 +1,12 @@ +``cassandra.protocol`` - Protocol Features +===================================================================== + +.. module:: cassandra.protocol + .. _custom_payload: -Custom Payload -============== +Custom Payloads +--------------- Native protocol version 4+ allows for a custom payload to be sent between clients and custom query handlers. The payload is specified as a string:binary_type dict holding custom key/value pairs. @@ -9,3 +14,4 @@ holding custom key/value pairs. By default these are ignored by the server. They can be useful for servers implementing a custom QueryHandler. +See :meth:`.Session.execute`, ::meth:`.Session.execute_async`, :attr:`.ResponseFuture.custom_payload`. From 48ec3ebee2bfecefb1689f1864da063dd115128c Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 4 Jun 2015 11:35:43 -0500 Subject: [PATCH 1497/3726] Correct release version in changelog --- CHANGELOG.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 0ba2a4ac6c..41b79b7e54 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,4 +1,4 @@ -2.6.0rc1 +2.6.0c1 ===== June 4, 2015 From f5c8aa952255c6196c6acfeec448889915305172 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 4 Jun 2015 11:59:13 -0500 Subject: [PATCH 1498/3726] Post release version --- cassandra/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 379f6d4534..059aeb00eb 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -23,7 +23,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (2, 6, '0c1') +__version_info__ = (2, 6, '0c1', 'post') __version__ = '.'.join(map(str, __version_info__)) From be0333626bf1c20a0ea85f359035d69b59efca2d Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 4 Jun 2015 13:04:17 -0500 Subject: [PATCH 1499/3726] Add custom_payload to Session.execute doc annotation --- docs/api/cassandra/cluster.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/api/cassandra/cluster.rst b/docs/api/cassandra/cluster.rst index 195d03a6ef..3edc22c82a 100644 --- a/docs/api/cassandra/cluster.rst +++ b/docs/api/cassandra/cluster.rst @@ -106,9 +106,9 @@ .. autoattribute:: encoder - .. automethod:: execute(statement[, parameters][, timeout][, trace]) + .. automethod:: execute(statement[, parameters][, timeout][, trace][, custom_payload]) - .. automethod:: execute_async(statement[, parameters][, trace]) + .. automethod:: execute_async(statement[, parameters][, trace][, custom_payload]) .. automethod:: prepare(statement) From 088a546e5281096a9628b07ec2a5602793bc5db2 Mon Sep 17 00:00:00 2001 From: Ricardo Sancho Date: Sat, 6 Jun 2015 11:41:31 +0100 Subject: [PATCH 1500/3726] Write timeouts being ignored with 0 responses DowngradingConsistencyRetryPolicy was ignoring write timeouts for WriteTypes SIMPLE, BATCH and COUNTER even if responses received was equal to 0. It is valid to ignore the failure only if at least one response was received. --- cassandra/policies.py | 2 +- tests/unit/test_policies.py | 8 ++++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/cassandra/policies.py b/cassandra/policies.py index d4d8914c8b..6d6c23b972 100644 --- a/cassandra/policies.py +++ b/cassandra/policies.py @@ -816,7 +816,7 @@ def on_write_timeout(self, query, consistency, write_type, required_responses, received_responses, retry_num): if retry_num != 0: return (self.RETHROW, None) - elif write_type in (WriteType.SIMPLE, WriteType.BATCH, WriteType.COUNTER): + elif write_type in (WriteType.SIMPLE, WriteType.BATCH, WriteType.COUNTER) and received_responses > 0: return (self.IGNORE, None) elif write_type == WriteType.UNLOGGED_BATCH: return self._pick_consistency(received_responses) diff --git a/tests/unit/test_policies.py b/tests/unit/test_policies.py index 4f865fb1ee..bd8541047f 100644 --- a/tests/unit/test_policies.py +++ b/tests/unit/test_policies.py @@ -1052,6 +1052,14 @@ def test_write_timeout(self): self.assertEqual(retry, RetryPolicy.RETHROW) self.assertEqual(consistency, None) + # On these type of writes failures should not be ignored + # if received_responses is 0 + for write_type in (WriteType.SIMPLE, WriteType.BATCH, WriteType.COUNTER): + retry, consistency = policy.on_write_timeout( + query=None, consistency=ONE, write_type=write_type, + required_responses=1, received_responses=0, retry_num=0) + self.assertEqual(retry, RetryPolicy.RETHROW) + # ignore failures on these types of writes for write_type in (WriteType.SIMPLE, WriteType.BATCH, WriteType.COUNTER): retry, consistency = policy.on_write_timeout( From b48944ec61a83b4a914da803c187a526cf8a7982 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 8 Jun 2015 08:54:09 -0500 Subject: [PATCH 1501/3726] Remove warning on missing blist Warning is not required, especially since benchmarks show the pure Python impl is faster for most core use cases. --- cassandra/util.py | 7 ------- 1 file changed, 7 deletions(-) diff --git a/cassandra/util.py b/cassandra/util.py index 45ef0e4fe7..ba49b5687e 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -480,13 +480,6 @@ def isdisjoint(self, other): from blist import sortedset except ImportError: - import warnings - - warnings.warn( - "The blist library is not available, so a pure python list-based set will " - "be used in place of blist.sortedset for set collection values. " - "You can find the blist library here: https://pypi.python.org/pypi/blist/") - from bisect import bisect_left class sortedset(object): From 015a0e18efef8a06d218028c189d373dcf52b3c1 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 8 Jun 2015 17:45:26 -0500 Subject: [PATCH 1502/3726] Automatically downgrading protocol version on control connection PYTHON-240 --- cassandra/cluster.py | 39 +++++-- cassandra/connection.py | 146 +++++++++++++------------- cassandra/io/asyncorereactor.py | 1 - cassandra/io/eventletreactor.py | 1 - cassandra/io/geventreactor.py | 1 - cassandra/io/libevreactor.py | 1 - cassandra/io/twistedreactor.py | 1 - cassandra/protocol.py | 6 +- tests/unit/io/test_asyncorereactor.py | 23 ++-- tests/unit/io/test_libevreactor.py | 25 +++-- tests/unit/io/test_twistedreactor.py | 18 ++-- tests/unit/test_connection.py | 81 ++++++-------- 12 files changed, 178 insertions(+), 165 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index cb2502e7d2..bd3eefeea7 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -47,7 +47,7 @@ InvalidRequest, OperationTimedOut, UnsupportedOperation, Unauthorized) from cassandra.connection import (ConnectionException, ConnectionShutdown, - ConnectionHeartbeat) + ConnectionHeartbeat, ProtocolVersionUnsupported) from cassandra.cqltypes import UserType from cassandra.encoder import Encoder from cassandra.protocol import (QueryMessage, ResultMessage, @@ -60,7 +60,7 @@ IsBootstrappingErrorMessage, BatchMessage, RESULT_KIND_PREPARED, RESULT_KIND_SET_KEYSPACE, RESULT_KIND_ROWS, - RESULT_KIND_SCHEMA_CHANGE) + RESULT_KIND_SCHEMA_CHANGE, MIN_SUPPORTED_VERSION) from cassandra.metadata import Metadata, protect_name from cassandra.policies import (TokenAwarePolicy, DCAwareRoundRobinPolicy, SimpleConvictionPolicy, ExponentialReconnectionPolicy, HostDistance, @@ -220,9 +220,14 @@ class Cluster(object): server will be automatically used. """ - protocol_version = 2 + protocol_version = 4 """ - The version of the native protocol to use. + The maximum version of the native protocol to use. + + The driver will automatically downgrade version based on a negotiation with + the server, but it is most efficient to set this to the maximum supported + by your version of Cassandra. Setting this will also prevent conflicting + versions negotiated if your cluster is upgraded. Version 2 of the native protocol adds support for lightweight transactions, batch operations, and automatic query paging. The v2 protocol is @@ -233,6 +238,10 @@ class Cluster(object): serial consistency levels for :class:`~.BatchStatement`, and an improved connection pool. + Version 4 of the native protocol adds a number of new types, server warnings, + new failure messages, and custom payloads. Details in the + `project docs `_ + The following table describes the native protocol versions that are supported by each version of Cassandra: @@ -245,6 +254,8 @@ class Cluster(object): +-------------------+-------------------+ | 2.1 | 1, 2, 3 | +-------------------+-------------------+ + | 2.2 | 1, 2, 3, 4 | + +-------------------+-------------------+ """ compression = True @@ -495,7 +506,7 @@ def __init__(self, ssl_options=None, sockopts=None, cql_version=None, - protocol_version=2, + protocol_version=4, executor_threads=2, max_schema_agreement_wait=10, control_connection_timeout=2.0, @@ -788,6 +799,15 @@ def _make_connection_kwargs(self, address, kwargs_dict): return kwargs_dict + def protocol_downgrade(self, host_addr, previous_version): + new_version = previous_version - 1 + if new_version < self.protocol_version: + if new_version >= MIN_SUPPORTED_VERSION: + log.warning("Downgrading core protocol version from %d to %d for %s", self.protocol_version, new_version, host_addr) + self.protocol_version = new_version + else: + raise Exception("Cannot downgrade protocol version (%d) below minimum supported version: %d" % (new_version, MIN_SUPPORTED_VERSION)) + def connect(self, keyspace=None): """ Creates and returns a new :class:`~.Session` object. If `keyspace` @@ -1503,7 +1523,6 @@ class Session(object): _pools = None _load_balancer = None _metrics = None - _protocol_version = None def __init__(self, cluster, hosts): self.cluster = cluster @@ -2094,7 +2113,13 @@ def _try_connect(self, host): node/token and schema metadata. """ log.debug("[control connection] Opening new connection to %s", host) - connection = self._cluster.connection_factory(host.address, is_control_connection=True) + + while True: + try: + connection = self._cluster.connection_factory(host.address, is_control_connection=True) + break + except ProtocolVersionUnsupported as e: + self._cluster.protocol_downgrade(host.address, e.startup_version) log.debug("[control connection] Established new connection %r, " "registering watchers and refreshing schema and topology", diff --git a/cassandra/connection.py b/cassandra/connection.py index c239313c45..bf0e5f0f69 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -13,12 +13,12 @@ # limitations under the License. from __future__ import absolute_import # to enable import io from stdlib -from collections import defaultdict, deque +from collections import defaultdict, deque, namedtuple import errno from functools import wraps, partial import io import logging -import os +import struct import sys from threading import Thread, Event, RLock import time @@ -32,13 +32,13 @@ from six.moves import range from cassandra import ConsistencyLevel, AuthenticationFailed, OperationTimedOut -from cassandra.marshal import int32_pack, header_unpack, v3_header_unpack, int32_unpack +from cassandra.marshal import int32_pack, uint8_unpack from cassandra.protocol import (ReadyMessage, AuthenticateMessage, OptionsMessage, StartupMessage, ErrorMessage, CredentialsMessage, QueryMessage, ResultMessage, decode_response, InvalidRequestException, SupportedMessage, AuthResponseMessage, AuthChallengeMessage, - AuthSuccessMessage, ProtocolException) + AuthSuccessMessage, ProtocolException, MAX_SUPPORTED_VERSION) from cassandra.util import OrderedDict @@ -88,6 +88,11 @@ def decompress(byts): HEADER_DIRECTION_TO_CLIENT = 0x80 HEADER_DIRECTION_MASK = 0x80 +frame_header_v1_v2 = struct.Struct('>BbBi') +frame_header_v3 = struct.Struct('>BhBi') + +_Frame = namedtuple('Frame', ('version', 'flags', 'stream', 'opcode', 'body_offset', 'end_pos')) + NONBLOCKING = (errno.EAGAIN, errno.EWOULDBLOCK) @@ -108,6 +113,14 @@ class ConnectionShutdown(ConnectionException): """ pass +class ProtocolVersionUnsupported(ConnectionException): + """ + Server rejected startup message due to unsupported protocol version + """ + def __init__(self, host, startup_version): + super(ProtocolVersionUnsupported, self).__init__("Unsupported protocol version on %s: %d", + (host, startup_version)) + self.startup_version = startup_version class ConnectionBusy(Exception): """ @@ -144,7 +157,7 @@ class Connection(object): out_buffer_size = 4096 cql_version = None - protocol_version = 2 + protocol_version = MAX_SUPPORTED_VERSION keyspace = None compression = True @@ -174,12 +187,15 @@ class Connection(object): msg_received = False + is_unsupported_proto_version = False + is_control_connection = False _iobuf = None + _current_frame = None def __init__(self, host='127.0.0.1', port=9042, authenticator=None, ssl_options=None, sockopts=None, compression=True, - cql_version=None, protocol_version=2, is_control_connection=False, + cql_version=None, protocol_version=MAX_SUPPORTED_VERSION, is_control_connection=False, user_type_map=None): self.host = host self.port = port @@ -193,34 +209,18 @@ def __init__(self, host='127.0.0.1', port=9042, authenticator=None, self.user_type_map = user_type_map self._push_watchers = defaultdict(set) self._iobuf = io.BytesIO() + if protocol_version >= 3: - self._header_unpack = v3_header_unpack - self._header_length = 5 self.max_request_id = (2 ** 15) - 1 # Don't fill the deque with 2**15 items right away. Start with 300 and add # more if needed. self.request_ids = deque(range(300)) self.highest_request_id = 299 else: - self._header_unpack = header_unpack - self._header_length = 4 self.max_request_id = (2 ** 7) - 1 self.request_ids = deque(range(self.max_request_id + 1)) self.highest_request_id = self.max_request_id - # 0 8 16 24 32 40 - # +---------+---------+---------+---------+---------+ - # | version | flags | stream | opcode | - # +---------+---------+---------+---------+---------+ - # | length | - # +---------+---------+---------+---------+ - # | | - # . ... body ... . - # . . - # . . - # +---------------------------------------- - self._full_header_length = self._header_length + 4 - self.lock = RLock() @classmethod @@ -249,6 +249,8 @@ def factory(cls, host, timeout, *args, **kwargs): conn = cls(host, *args, **kwargs) conn.connected_event.wait(timeout) if conn.last_error: + if conn.is_unsupported_proto_version: + raise ProtocolVersionUnsupported(host, conn.protocol_version) raise conn.last_error elif not conn.connected_event.is_set(): conn.close() @@ -377,42 +379,56 @@ def control_conn_disposed(self): self.is_control_connection = False self._push_watchers = {} + @defunct_on_error + def _read_frame_header(self): + buf = self._iobuf + pos = buf.tell() + if pos: + buf.seek(0) + version = uint8_unpack(buf.read(1)) & PROTOCOL_VERSION_MASK + if version > MAX_SUPPORTED_VERSION: + raise ProtocolError("This version of the driver does not support protocol version %d" % version) + frame_header = frame_header_v3 if version >= 3 else frame_header_v1_v2 + # this frame header struct is everything after the version byte + header_size = frame_header.size + 1 + if pos >= header_size: + flags, stream, op, body_len = frame_header.unpack(buf.read(frame_header.size)) + if body_len < 0: + raise ProtocolError("Received negative body length: %r" % body_len) + self._current_frame = _Frame(version, flags, stream, op, header_size, body_len + header_size) + + self._iobuf.seek(pos) + + return pos + + def _reset_frame(self): + leftover = self._iobuf.read() + self._iobuf = io.BytesIO() + self._iobuf.write(leftover) + self._current_frame = None + def process_io_buffer(self): while True: - pos = self._iobuf.tell() - if pos < self._full_header_length or (self._total_reqd_bytes > 0 and pos < self._total_reqd_bytes): + if not self._current_frame: + pos = self._read_frame_header() + else: + pos = self._iobuf.tell() + + if not self._current_frame or pos < self._current_frame.end_pos: # we don't have a complete header yet or we # already saw a header, but we don't have a # complete message yet return else: - # have enough for header, read body len from header - self._iobuf.seek(self._header_length) - body_len = int32_unpack(self._iobuf.read(4)) - - # seek to end to get length of current buffer - self._iobuf.seek(0, os.SEEK_END) - pos = self._iobuf.tell() - - if pos >= body_len + self._full_header_length: - # read message header and body - self._iobuf.seek(0) - msg = self._iobuf.read(self._full_header_length + body_len) - - # leave leftover in current buffer - leftover = self._iobuf.read() - self._iobuf = io.BytesIO() - self._iobuf.write(leftover) - - self._total_reqd_bytes = 0 - self.process_msg(msg, body_len) - else: - self._total_reqd_bytes = body_len + self._full_header_length - return + frame = self._current_frame + self._iobuf.seek(frame.body_offset) + msg = self._iobuf.read(frame.end_pos - frame.body_offset) + self.process_msg(self._current_frame, msg) + self._reset_frame() @defunct_on_error - def process_msg(self, msg, body_len): - version, flags, stream_id, opcode = self._header_unpack(msg[:self._header_length]) + def process_msg(self, header, body): + stream_id = header.stream if stream_id < 0: callback = None else: @@ -422,33 +438,12 @@ def process_msg(self, msg, body_len): self.msg_received = True - body = None try: - # check that the protocol version is supported - given_version = version & PROTOCOL_VERSION_MASK - if given_version != self.protocol_version: - msg = "Server protocol version (%d) does not match the specified driver protocol version (%d). " +\ - "Consider setting Cluster.protocol_version to %d." - raise ProtocolError(msg % (given_version, self.protocol_version, given_version)) - - # check that the header direction is correct - if version & HEADER_DIRECTION_MASK != HEADER_DIRECTION_TO_CLIENT: - raise ProtocolError( - "Header direction in response is incorrect; opcode %04x, stream id %r" - % (opcode, stream_id)) - - if body_len > 0: - body = msg[self._full_header_length:] - elif body_len == 0: - body = six.binary_type() - else: - raise ProtocolError("Got negative body length: %r" % body_len) - - response = decode_response(given_version, self.user_type_map, stream_id, - flags, opcode, body, self.decompressor) + response = decode_response(header.version, self.user_type_map, stream_id, + header.flags, header.opcode, body, self.decompressor) except Exception as exc: log.exception("Error decoding response from Cassandra. " - "opcode: %04x; message contents: %r", opcode, msg) + "opcode: %04x; message contents: %r", header.opcode, body) if callback is not None: callback(exc) self.defunct(exc) @@ -457,6 +452,9 @@ def process_msg(self, msg, body_len): try: if stream_id >= 0: if isinstance(response, ProtocolException): + if 'unsupported protocol version' in response.message: + self.is_unsupported_proto_version = True + log.error("Closing connection %s due to protocol error: %s", self, response.summary_msg()) self.defunct(response) if callback is not None: diff --git a/cassandra/io/asyncorereactor.py b/cassandra/io/asyncorereactor.py index ef687c388c..e5db1c74cc 100644 --- a/cassandra/io/asyncorereactor.py +++ b/cassandra/io/asyncorereactor.py @@ -135,7 +135,6 @@ class AsyncoreConnection(Connection, asyncore.dispatcher): _loop = None - _total_reqd_bytes = 0 _writable = False _readable = False diff --git a/cassandra/io/eventletreactor.py b/cassandra/io/eventletreactor.py index 670d0f1865..f123e0bd68 100644 --- a/cassandra/io/eventletreactor.py +++ b/cassandra/io/eventletreactor.py @@ -48,7 +48,6 @@ class EventletConnection(Connection): An implementation of :class:`.Connection` that utilizes ``eventlet``. """ - _total_reqd_bytes = 0 _read_watcher = None _write_watcher = None _socket = None diff --git a/cassandra/io/geventreactor.py b/cassandra/io/geventreactor.py index 6e9af0da4d..cdf1b25451 100644 --- a/cassandra/io/geventreactor.py +++ b/cassandra/io/geventreactor.py @@ -45,7 +45,6 @@ class GeventConnection(Connection): An implementation of :class:`.Connection` that utilizes ``gevent``. """ - _total_reqd_bytes = 0 _read_watcher = None _write_watcher = None _socket = None diff --git a/cassandra/io/libevreactor.py b/cassandra/io/libevreactor.py index 93b4c97854..ca27bee800 100644 --- a/cassandra/io/libevreactor.py +++ b/cassandra/io/libevreactor.py @@ -215,7 +215,6 @@ class LibevConnection(Connection): """ _libevloop = None _write_watcher_is_active = False - _total_reqd_bytes = 0 _read_watcher = None _write_watcher = None _socket = None diff --git a/cassandra/io/twistedreactor.py b/cassandra/io/twistedreactor.py index ff81e5613f..425c96ce00 100644 --- a/cassandra/io/twistedreactor.py +++ b/cassandra/io/twistedreactor.py @@ -141,7 +141,6 @@ class TwistedConnection(Connection): """ _loop = None - _total_reqd_bytes = 0 @classmethod def initialize_reactor(cls): diff --git a/cassandra/protocol.py b/cassandra/protocol.py index 20904ecec3..9b544236ec 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -53,7 +53,9 @@ class InternalError(Exception): ColumnMetadata = namedtuple("ColumnMetadata", ['keyspace_name', 'table_name', 'name', 'type']) -HEADER_DIRECTION_FROM_CLIENT = 0x00 +MIN_SUPPORTED_VERSION = 1 +MAX_SUPPORTED_VERSION = 4 + HEADER_DIRECTION_TO_CLIENT = 0x80 HEADER_DIRECTION_MASK = 0x80 @@ -967,7 +969,7 @@ def write_header(f, version, flags, stream_id, opcode, length): Write a CQL protocol frame header. """ pack = v3_header_pack if version >= 3 else header_pack - f.write(pack(version | HEADER_DIRECTION_FROM_CLIENT, flags, stream_id, opcode)) + f.write(pack(version, flags, stream_id, opcode)) write_int(f, length) diff --git a/tests/unit/io/test_asyncorereactor.py b/tests/unit/io/test_asyncorereactor.py index 17e42a0221..cdc61ec2d2 100644 --- a/tests/unit/io/test_asyncorereactor.py +++ b/tests/unit/io/test_asyncorereactor.py @@ -20,17 +20,15 @@ import unittest # noqa import errno +import math +from mock import patch, Mock import os - from six import BytesIO - import socket from socket import error as socket_error -from mock import patch, Mock - from cassandra.connection import (HEADER_DIRECTION_TO_CLIENT, - ConnectionException) + ConnectionException, ProtocolError) from cassandra.io.asyncorereactor import AsyncoreConnection from cassandra.protocol import (write_stringmultimap, write_int, write_string, SupportedMessage, ReadyMessage, ServerError) @@ -132,7 +130,7 @@ def side_effect(*args): c.socket.recv.side_effect = side_effect c.handle_read() - self.assertEqual(c._total_reqd_bytes, 20000 + len(header)) + self.assertEqual(c._current_frame.end_pos, 20000 + len(header)) # the EAGAIN prevents it from reading the last 100 bytes c._iobuf.seek(0, os.SEEK_END) pos = c._iobuf.tell() @@ -159,7 +157,7 @@ def test_protocol_error(self, *args): # make sure it errored correctly self.assertTrue(c.is_defunct) self.assertTrue(c.connected_event.is_set()) - self.assertIsInstance(c.last_error, ConnectionException) + self.assertIsInstance(c.last_error, ProtocolError) def test_error_message_on_startup(self, *args): c = self.make_connection() @@ -217,13 +215,18 @@ def test_partial_send(self, *args): c = self.make_connection() # only write the first four bytes of the OptionsMessage + write_size = 4 c.socket.send.side_effect = None - c.socket.send.return_value = 4 + c.socket.send.return_value = write_size c.handle_write() + msg_size = 9 # v3+ frame header + expected_writes = int(math.ceil(float(msg_size) / write_size)) + size_mod = msg_size % write_size + last_write_size = size_mod if size_mod else write_size self.assertFalse(c.is_defunct) - self.assertEqual(2, c.socket.send.call_count) - self.assertEqual(4, len(c.socket.send.call_args[0][0])) + self.assertEqual(expected_writes, c.socket.send.call_count) + self.assertEqual(last_write_size, len(c.socket.send.call_args[0][0])) def test_socket_error_on_read(self, *args): c = self.make_connection() diff --git a/tests/unit/io/test_libevreactor.py b/tests/unit/io/test_libevreactor.py index ddd2dc0417..61e72421ce 100644 --- a/tests/unit/io/test_libevreactor.py +++ b/tests/unit/io/test_libevreactor.py @@ -17,18 +17,16 @@ import unittest # noqa import errno +import math +from mock import patch, Mock import os -import sys - import six from six import BytesIO - from socket import error as socket_error - -from mock import patch, Mock +import sys from cassandra.connection import (HEADER_DIRECTION_TO_CLIENT, - ConnectionException) + ConnectionException, ProtocolError) from cassandra.protocol import (write_stringmultimap, write_int, write_string, SupportedMessage, ReadyMessage, ServerError) @@ -128,7 +126,7 @@ def side_effect(*args): c._socket.recv.side_effect = side_effect c.handle_read(None, 0) - self.assertEqual(c._total_reqd_bytes, 20000 + len(header)) + self.assertEqual(c._current_frame.end_pos, 20000 + len(header)) # the EAGAIN prevents it from reading the last 100 bytes c._iobuf.seek(0, os.SEEK_END) pos = c._iobuf.tell() @@ -155,7 +153,7 @@ def test_protocol_error(self, *args): # make sure it errored correctly self.assertTrue(c.is_defunct) self.assertTrue(c.connected_event.is_set()) - self.assertIsInstance(c.last_error, ConnectionException) + self.assertIsInstance(c.last_error, ProtocolError) def test_error_message_on_startup(self, *args): c = self.make_connection() @@ -213,13 +211,18 @@ def test_partial_send(self, *args): c = self.make_connection() # only write the first four bytes of the OptionsMessage + write_size = 4 c._socket.send.side_effect = None - c._socket.send.return_value = 4 + c._socket.send.return_value = write_size c.handle_write(None, 0) + msg_size = 9 # v3+ frame header + expected_writes = int(math.ceil(float(msg_size) / write_size)) + size_mod = msg_size % write_size + last_write_size = size_mod if size_mod else write_size self.assertFalse(c.is_defunct) - self.assertEqual(2, c._socket.send.call_count) - self.assertEqual(4, len(c._socket.send.call_args[0][0])) + self.assertEqual(expected_writes, c._socket.send.call_count) + self.assertEqual(last_write_size, len(c._socket.send.call_args[0][0])) def test_socket_error_on_read(self, *args): c = self.make_connection() diff --git a/tests/unit/io/test_twistedreactor.py b/tests/unit/io/test_twistedreactor.py index 8563b948c0..d2142b09ca 100644 --- a/tests/unit/io/test_twistedreactor.py +++ b/tests/unit/io/test_twistedreactor.py @@ -25,6 +25,7 @@ except ImportError: twistedreactor = None # NOQA +from cassandra.connection import _Frame class TestTwistedProtocol(unittest.TestCase): @@ -154,16 +155,16 @@ def test_handle_read__incomplete(self): self.obj_ut.process_msg = Mock() self.assertEqual(self.obj_ut._iobuf.getvalue(), '') # buf starts empty # incomplete header - self.obj_ut._iobuf.write('\xff\x00\x00\x00') + self.obj_ut._iobuf.write('\x84\x00\x00\x00\x00') self.obj_ut.handle_read() - self.assertEqual(self.obj_ut._iobuf.getvalue(), '\xff\x00\x00\x00') + self.assertEqual(self.obj_ut._iobuf.getvalue(), '\x84\x00\x00\x00\x00') # full header, but incomplete body self.obj_ut._iobuf.write('\x00\x00\x00\x15') self.obj_ut.handle_read() self.assertEqual(self.obj_ut._iobuf.getvalue(), - '\xff\x00\x00\x00\x00\x00\x00\x15') - self.assertEqual(self.obj_ut._total_reqd_bytes, 29) + '\x84\x00\x00\x00\x00\x00\x00\x00\x15') + self.assertEqual(self.obj_ut._current_frame.end_pos, 30) # verify we never attempted to process the incomplete message self.assertFalse(self.obj_ut.process_msg.called) @@ -176,12 +177,15 @@ def test_handle_read__fullmessage(self): self.assertEqual(self.obj_ut._iobuf.getvalue(), '') # buf starts empty # write a complete message, plus 'NEXT' (to simulate next message) + # assumes protocol v3+ as default Connection.protocol_version + body = 'this is the drum roll' + extra = 'NEXT' self.obj_ut._iobuf.write( - '\xff\x00\x00\x00\x00\x00\x00\x15this is the drum rollNEXT') + '\x84\x01\x00\x02\x03\x00\x00\x00\x15' + body + extra) self.obj_ut.handle_read() - self.assertEqual(self.obj_ut._iobuf.getvalue(), 'NEXT') + self.assertEqual(self.obj_ut._iobuf.getvalue(), extra) self.obj_ut.process_msg.assert_called_with( - '\xff\x00\x00\x00\x00\x00\x00\x15this is the drum roll', 21) + _Frame(version=4, flags=1, stream=2, opcode=3, body_offset=9, end_pos= 9 + len(body)), body) @patch('twisted.internet.reactor.connectTCP') def test_push(self, mock_connectTCP): diff --git a/tests/unit/test_connection.py b/tests/unit/test_connection.py index 7fc4ed4e5a..e5be60759d 100644 --- a/tests/unit/test_connection.py +++ b/tests/unit/test_connection.py @@ -23,31 +23,38 @@ from threading import Lock from cassandra.cluster import Cluster -from cassandra.connection import (Connection, HEADER_DIRECTION_TO_CLIENT, - HEADER_DIRECTION_FROM_CLIENT, ProtocolError, - locally_supported_compressions, ConnectionHeartbeat) -from cassandra.marshal import uint8_pack, uint32_pack +from cassandra.connection import (Connection, HEADER_DIRECTION_TO_CLIENT, ProtocolError, + locally_supported_compressions, ConnectionHeartbeat, _Frame) +from cassandra.marshal import uint8_pack, uint32_pack, int32_pack from cassandra.protocol import (write_stringmultimap, write_int, write_string, SupportedMessage) class ConnectionTest(unittest.TestCase): - protocol_version = 2 - def make_connection(self): c = Connection('1.2.3.4') c._socket = Mock() c._socket.send.side_effect = lambda x: len(x) return c - def make_header_prefix(self, message_class, version=2, stream_id=0): - return six.binary_type().join(map(uint8_pack, [ - 0xff & (HEADER_DIRECTION_TO_CLIENT | version), - 0, # flags (compression) - stream_id, - message_class.opcode # opcode - ])) + def make_header_prefix(self, message_class, version=Connection.protocol_version, stream_id=0): + if Connection.protocol_version < 3: + return six.binary_type().join(map(uint8_pack, [ + 0xff & (HEADER_DIRECTION_TO_CLIENT | version), + 0, # flags (compression) + stream_id, + message_class.opcode # opcode + ])) + else: + return six.binary_type().join(map(uint8_pack, [ + 0xff & (HEADER_DIRECTION_TO_CLIENT | version), + 0, # flags (compression) + 0, # MSB for v3+ stream + stream_id, + message_class.opcode # opcode + ])) + def make_options_body(self): options_buf = BytesIO() @@ -72,31 +79,12 @@ def test_bad_protocol_version(self, *args): c.defunct = Mock() # read in a SupportedMessage response - header = self.make_header_prefix(SupportedMessage, version=0x04) + header = self.make_header_prefix(SupportedMessage, version=0x7f) options = self.make_options_body() message = self.make_msg(header, options) - c.process_msg(message, len(message) - 8) - - # make sure it errored correctly - c.defunct.assert_called_once_with(ANY) - args, kwargs = c.defunct.call_args - self.assertIsInstance(args[0], ProtocolError) - - def test_bad_header_direction(self, *args): - c = self.make_connection() - c._callbacks = Mock() - c.defunct = Mock() - - # read in a SupportedMessage response - header = six.binary_type().join(uint8_pack(i) for i in ( - 0xff & (HEADER_DIRECTION_FROM_CLIENT | self.protocol_version), - 0, # flags (compression) - 0, - SupportedMessage.opcode # opcode - )) - options = self.make_options_body() - message = self.make_msg(header, options) - c.process_msg(message, len(message) - 8) + c._iobuf = BytesIO() + c._iobuf.write(message) + c.process_io_buffer() # make sure it errored correctly c.defunct.assert_called_once_with(ANY) @@ -110,9 +98,10 @@ def test_negative_body_length(self, *args): # read in a SupportedMessage response header = self.make_header_prefix(SupportedMessage) - options = self.make_options_body() - message = self.make_msg(header, options) - c.process_msg(message, -13) + message = header + int32_pack(-13) + c._iobuf = BytesIO() + c._iobuf.write(message) + c.process_io_buffer() # make sure it errored correctly c.defunct.assert_called_once_with(ANY) @@ -135,8 +124,7 @@ def test_unsupported_cql_version(self, *args): }) options = options_buf.getvalue() - message = self.make_msg(header, options) - c.process_msg(message, len(message) - 8) + c.process_msg(_Frame(version=4, flags=0, stream=0, opcode=SupportedMessage.opcode, body_offset=9, end_pos=9 + len(options)), options) # make sure it errored correctly c.defunct.assert_called_once_with(ANY) @@ -155,8 +143,6 @@ def test_prefer_lz4_compression(self, *args): locally_supported_compressions['snappy'] = ('snappycompress', 'snappydecompress') # read in a SupportedMessage response - header = self.make_header_prefix(SupportedMessage) - options_buf = BytesIO() write_stringmultimap(options_buf, { 'CQL_VERSION': ['3.0.3'], @@ -164,8 +150,7 @@ def test_prefer_lz4_compression(self, *args): }) options = options_buf.getvalue() - message = self.make_msg(header, options) - c.process_msg(message, len(message) - 8) + c.process_msg(_Frame(version=4, flags=0, stream=0, opcode=SupportedMessage.opcode, body_offset=9, end_pos=9 + len(options)), options) self.assertEqual(c.decompressor, locally_supported_compressions['lz4'][1]) @@ -192,8 +177,7 @@ def test_requested_compression_not_available(self, *args): }) options = options_buf.getvalue() - message = self.make_msg(header, options) - c.process_msg(message, len(message) - 8) + c.process_msg(_Frame(version=4, flags=0, stream=0, opcode=SupportedMessage.opcode, body_offset=9, end_pos=9 + len(options)), options) # make sure it errored correctly c.defunct.assert_called_once_with(ANY) @@ -223,8 +207,7 @@ def test_use_requested_compression(self, *args): }) options = options_buf.getvalue() - message = self.make_msg(header, options) - c.process_msg(message, len(message) - 8) + c.process_msg(_Frame(version=4, flags=0, stream=0, opcode=SupportedMessage.opcode, body_offset=9, end_pos=9 + len(options)), options) self.assertEqual(c.decompressor, locally_supported_compressions['snappy'][1]) From 7abad89169c213e1edc1e9cc03625afa3aa041d6 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 9 Jun 2015 12:21:07 -0500 Subject: [PATCH 1503/3726] Change return conversion for murmur3 ext PYTHON-331 Fixes an issue where the hash value was cast to a four-byte type on some platforms. --- cassandra/murmur3.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/murmur3.c b/cassandra/murmur3.c index bdcb97289f..657c01f69d 100644 --- a/cassandra/murmur3.c +++ b/cassandra/murmur3.c @@ -201,7 +201,7 @@ murmur3(PyObject *self, PyObject *args) // TODO handle x86 version? result = MurmurHash3_x64_128((void *)key, len, seed); - return (PyObject *) PyLong_FromLong((long int)result); + return (PyObject *) PyLong_FromLongLong(result); } static PyMethodDef murmur3_methods[] = { From 57f3b0f6a70d7fe7cf0c8272301e8cee9d1dc244 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 10 Jun 2015 10:14:05 -0500 Subject: [PATCH 1504/3726] Don't default to TokenAware LBP is murmur3 ext. is not present. --- cassandra/cluster.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index cb2502e7d2..834872398b 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -61,7 +61,7 @@ BatchMessage, RESULT_KIND_PREPARED, RESULT_KIND_SET_KEYSPACE, RESULT_KIND_ROWS, RESULT_KIND_SCHEMA_CHANGE) -from cassandra.metadata import Metadata, protect_name +from cassandra.metadata import Metadata, protect_name, murmur3 from cassandra.policies import (TokenAwarePolicy, DCAwareRoundRobinPolicy, SimpleConvictionPolicy, ExponentialReconnectionPolicy, HostDistance, RetryPolicy) @@ -173,7 +173,9 @@ def _shutdown_cluster(cluster): import platform if platform.python_implementation() == 'CPython': def default_lbp_factory(): - return TokenAwarePolicy(DCAwareRoundRobinPolicy()) + if murmur3 is not None: + return TokenAwarePolicy(DCAwareRoundRobinPolicy()) + return DCAwareRoundRobinPolicy() else: def default_lbp_factory(): return DCAwareRoundRobinPolicy() From 0ca97eaf4335f7a89b133ecdcfe9e7dab1419733 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 10 Jun 2015 10:14:36 -0500 Subject: [PATCH 1505/3726] cqle: deprecate cqle.columns.TimeUUID.from_datetime Prefer utility function in core instead. Resolves an issue with rounding in the cqle function, manifesting on Windows PYTHON-341 --- cassandra/cqlengine/columns.py | 28 ++++------------------------ 1 file changed, 4 insertions(+), 24 deletions(-) diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index d418b11947..353064b6bc 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -558,30 +558,10 @@ def from_datetime(self, dt): :type dt: datetime :return: """ - global _last_timestamp - - epoch = datetime(1970, 1, 1, tzinfo=dt.tzinfo) - offset = epoch.tzinfo.utcoffset(epoch).total_seconds() if epoch.tzinfo else 0 - timestamp = (dt - epoch).total_seconds() - offset - - node = None - clock_seq = None - - nanoseconds = int(timestamp * 1e9) - timestamp = int(nanoseconds // 100) + 0x01b21dd213814000 - - if clock_seq is None: - import random - clock_seq = random.randrange(1 << 14) # instead of stable storage - time_low = timestamp & 0xffffffff - time_mid = (timestamp >> 32) & 0xffff - time_hi_version = (timestamp >> 48) & 0x0fff - clock_seq_low = clock_seq & 0xff - clock_seq_hi_variant = (clock_seq >> 8) & 0x3f - if node is None: - node = getnode() - return pyUUID(fields=(time_low, time_mid, time_hi_version, - clock_seq_hi_variant, clock_seq_low, node), version=1) + msg = "cqlengine.columns.TimeUUID.from_datetime is deprecated. Use cassandra.util.uuid_from_time instead." + warnings.warn(msg, DeprecationWarning) + log.warning(msg) + return util.uuid_from_time(dt) class Boolean(Column): From 04b69b96d348e9313d6c4727719ea2c5f98cdc8b Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 10 Jun 2015 11:18:50 -0500 Subject: [PATCH 1506/3726] Don't wait for schema agreement during startup. Fixes an issue where connect will stall when connecting to mixed version clusters. Should not affect model consistency since we are already registered for schema_change events. --- cassandra/cluster.py | 2 +- tests/integration/standard/test_cluster.py | 5 +---- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index cb2502e7d2..3f72e77455 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2118,7 +2118,7 @@ def _try_connect(self, host): peers_query, local_query, timeout=self._timeout) self._refresh_node_list_and_token_map(connection, preloaded_results=shared_results) - self._refresh_schema(connection, preloaded_results=shared_results) + self._refresh_schema(connection, preloaded_results=shared_results, schema_agreement_wait=-1) if not self._cluster.metadata.keyspaces: log.warning("[control connection] No schema built on connect; retrying without wait for schema agreement") self._refresh_schema(connection, preloaded_results=shared_results, schema_agreement_wait=0) diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index bf6bb1d476..f45a32ed43 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -336,10 +336,7 @@ def test_refresh_schema_no_wait(self): # cluster agreement wait exceeded c = Cluster(protocol_version=PROTOCOL_VERSION, max_schema_agreement_wait=agreement_timeout) - start_time = time.time() - s = c.connect() - end_time = time.time() - self.assertGreaterEqual(end_time - start_time, agreement_timeout) + c.connect() self.assertTrue(c.metadata.keyspaces) # cluster agreement wait used for refresh From 434014e355ae3a4a8ba24838c131786ae7fb4645 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 10 Jun 2015 17:14:18 -0500 Subject: [PATCH 1507/3726] Refactor socket creation to Connection base PYTHON-322 Fixes an issue where AsyncoreConnection could not be used with SSL in Python 2.6 (ssl did not implement connect_ex). Refactored socket connect to common routine, and simplified AsyncoreConnection by using that and initializing the dispatcher with the connected socket. Also adds ssl support to EventletConnection. --- cassandra/connection.py | 38 +++++++++++++++++++ cassandra/io/asyncorereactor.py | 65 +++------------------------------ cassandra/io/eventletreactor.py | 32 +++------------- cassandra/io/geventreactor.py | 27 +++----------- cassandra/io/libevreactor.py | 32 +--------------- cassandra/io/twistedreactor.py | 1 - 6 files changed, 55 insertions(+), 140 deletions(-) diff --git a/cassandra/connection.py b/cassandra/connection.py index c239313c45..2cf1c21949 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -19,10 +19,16 @@ import io import logging import os +import socket import sys from threading import Thread, Event, RLock import time +try: + import ssl +except ImportError: + ssl = None # NOQA + if 'gevent.monkey' in sys.modules: from gevent.queue import Queue, Empty else: @@ -177,6 +183,11 @@ class Connection(object): is_control_connection = False _iobuf = None + _socket = None + + _socket_impl = socket + _ssl_impl = ssl + def __init__(self, host='127.0.0.1', port=9042, authenticator=None, ssl_options=None, sockopts=None, compression=True, cql_version=None, protocol_version=2, is_control_connection=False, @@ -256,6 +267,33 @@ def factory(cls, host, timeout, *args, **kwargs): else: return conn + def _connect_socket(self): + sockerr = None + addresses = socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM) + for (af, socktype, proto, canonname, sockaddr) in addresses: + try: + self._socket = self._socket_impl.socket(af, socktype, proto) + if self.ssl_options: + if not self._ssl_impl: + raise Exception("This version of Python was not compiled with SSL support") + self._socket = self._ssl_impl.wrap_socket(self._socket, **self.ssl_options) + self._socket.settimeout(1.0) + self._socket.connect(sockaddr) + sockerr = None + break + except socket.error as err: + if self._socket: + self._socket.close() + self._socket = None + sockerr = err + + if sockerr: + raise socket.error(sockerr.errno, "Tried connecting to %s. Last error: %s" % ([a[4] for a in addresses], sockerr.strerror)) + + if self.sockopts: + for args in self.sockopts: + self._socket.setsockopt(*args) + def close(self): raise NotImplementedError() diff --git a/cassandra/io/asyncorereactor.py b/cassandra/io/asyncorereactor.py index ef687c388c..235c7660b5 100644 --- a/cassandra/io/asyncorereactor.py +++ b/cassandra/io/asyncorereactor.py @@ -23,7 +23,6 @@ from six.moves import range -from errno import EALREADY, EINPROGRESS, EWOULDBLOCK, EINVAL, EISCONN, errorcode try: from weakref import WeakSet except ImportError: @@ -36,9 +35,7 @@ except ImportError: ssl = None # NOQA -from cassandra import OperationTimedOut -from cassandra.connection import (Connection, ConnectionShutdown, - ConnectionException, NONBLOCKING) +from cassandra.connection import (Connection, ConnectionShutdown, NONBLOCKING) from cassandra.protocol import RegisterMessage log = logging.getLogger(__name__) @@ -168,66 +165,17 @@ def __init__(self, *args, **kwargs): self._loop.connection_created(self) - sockerr = None - addresses = socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM) - for (af, socktype, proto, canonname, sockaddr) in addresses: - try: - self.create_socket(af, socktype) - self.connect(sockaddr) - sockerr = None - break - except socket.error as err: - sockerr = err - if sockerr: - raise socket.error(sockerr.errno, "Tried connecting to %s. Last error: %s" % ([a[4] for a in addresses], sockerr.strerror)) - - self.add_channel() - - if self.sockopts: - for args in self.sockopts: - self.socket.setsockopt(*args) + self._connect_socket() + asyncore.dispatcher.__init__(self, self._socket) self._writable = True self._readable = True + self._send_options_message() + # start the event loop if needed self._loop.maybe_start() - def set_socket(self, sock): - # Overrides the same method in asyncore. We deliberately - # do not call add_channel() in this method so that we can call - # it later, after connect() has completed. - self.socket = sock - self._fileno = sock.fileno() - - def create_socket(self, family, type): - # copied from asyncore, but with the line to set the socket in - # non-blocking mode removed (we will do that after connecting) - self.family_and_type = family, type - sock = socket.socket(family, type) - if self.ssl_options: - if not ssl: - raise Exception("This version of Python was not compiled with SSL support") - sock = ssl.wrap_socket(sock, **self.ssl_options) - self.set_socket(sock) - - def connect(self, address): - # this is copied directly from asyncore.py, except that - # a timeout is set before connecting - self.connected = False - self.connecting = True - self.socket.settimeout(1.0) - err = self.socket.connect_ex(address) - if err in (EINPROGRESS, EALREADY, EWOULDBLOCK) \ - or err == EINVAL and os.name in ('nt', 'ce'): - raise ConnectionException("Timed out connecting to %s" % (address[0])) - if err in (0, EISCONN): - self.addr = address - self.socket.setblocking(0) - self.handle_connect_event() - else: - raise socket.error(err, os.strerror(err)) - def close(self): with self.lock: if self.is_closed: @@ -248,9 +196,6 @@ def close(self): # don't leave in-progress operations hanging self.connected_event.set() - def handle_connect(self): - self._send_options_message() - def handle_error(self): self.defunct(sys.exc_info()[1]) diff --git a/cassandra/io/eventletreactor.py b/cassandra/io/eventletreactor.py index 670d0f1865..484ebbe3fb 100644 --- a/cassandra/io/eventletreactor.py +++ b/cassandra/io/eventletreactor.py @@ -24,11 +24,9 @@ from functools import partial import logging import os -from threading import Event - from six.moves import xrange +from threading import Event -from cassandra import OperationTimedOut from cassandra.connection import Connection, ConnectionShutdown from cassandra.protocol import RegisterMessage @@ -51,7 +49,9 @@ class EventletConnection(Connection): _total_reqd_bytes = 0 _read_watcher = None _write_watcher = None - _socket = None + + _socket_impl = eventlet.green.socket + _ssl_impl = eventlet.green.ssl @classmethod def initialize_reactor(cls): @@ -66,29 +66,7 @@ def __init__(self, *args, **kwargs): self._callbacks = {} self._push_watchers = defaultdict(set) - sockerr = None - addresses = socket.getaddrinfo( - self.host, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM - ) - for (af, socktype, proto, canonname, sockaddr) in addresses: - try: - self._socket = socket.socket(af, socktype, proto) - self._socket.settimeout(1.0) - self._socket.connect(sockaddr) - sockerr = None - break - except socket.error as err: - sockerr = err - if sockerr: - raise socket.error( - sockerr.errno, - "Tried connecting to %s. Last error: %s" % ( - [a[4] for a in addresses], sockerr.strerror) - ) - - if self.sockopts: - for args in self.sockopts: - self._socket.setsockopt(*args) + self._connect_socket() self._read_watcher = eventlet.spawn(lambda: self.handle_read()) self._write_watcher = eventlet.spawn(lambda: self.handle_write()) diff --git a/cassandra/io/geventreactor.py b/cassandra/io/geventreactor.py index 6e9af0da4d..e9f1dd7ceb 100644 --- a/cassandra/io/geventreactor.py +++ b/cassandra/io/geventreactor.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import gevent -from gevent import select, socket, ssl +from gevent import select, socket from gevent.event import Event from gevent.queue import Queue @@ -25,7 +25,6 @@ from errno import EALREADY, EINPROGRESS, EWOULDBLOCK, EINVAL -from cassandra import OperationTimedOut from cassandra.connection import Connection, ConnectionShutdown from cassandra.protocol import RegisterMessage @@ -48,7 +47,9 @@ class GeventConnection(Connection): _total_reqd_bytes = 0 _read_watcher = None _write_watcher = None - _socket = None + + _socket_impl = gevent.socket + _ssl_impl = gevent.ssl def __init__(self, *args, **kwargs): Connection.__init__(self, *args, **kwargs) @@ -59,25 +60,7 @@ def __init__(self, *args, **kwargs): self._callbacks = {} self._push_watchers = defaultdict(set) - sockerr = None - addresses = socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM) - for (af, socktype, proto, canonname, sockaddr) in addresses: - try: - self._socket = socket.socket(af, socktype, proto) - if self.ssl_options: - self._socket = ssl.wrap_socket(self._socket, **self.ssl_options) - self._socket.settimeout(1.0) - self._socket.connect(sockaddr) - sockerr = None - break - except socket.error as err: - sockerr = err - if sockerr: - raise socket.error(sockerr.errno, "Tried connecting to %s. Last error: %s" % ([a[4] for a in addresses], sockerr.strerror)) - - if self.sockopts: - for args in self.sockopts: - self._socket.setsockopt(*args) + self._connect_socket() self._read_watcher = gevent.spawn(self.handle_read) self._write_watcher = gevent.spawn(self.handle_write) diff --git a/cassandra/io/libevreactor.py b/cassandra/io/libevreactor.py index 93b4c97854..eaa6f4ae38 100644 --- a/cassandra/io/libevreactor.py +++ b/cassandra/io/libevreactor.py @@ -22,8 +22,7 @@ from six.moves import xrange -from cassandra import OperationTimedOut -from cassandra.connection import Connection, ConnectionShutdown, NONBLOCKING +from cassandra.connection import Connection, ConnectionShutdown, NONBLOCKING, ssl from cassandra.protocol import RegisterMessage try: import cassandra.io.libevwrapper as libev @@ -37,11 +36,6 @@ "the C extension.") -try: - import ssl -except ImportError: - ssl = None # NOQA - log = logging.getLogger(__name__) @@ -244,31 +238,9 @@ def __init__(self, *args, **kwargs): self._callbacks = {} self.deque = deque() self._deque_lock = Lock() - - sockerr = None - addresses = socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM) - for (af, socktype, proto, canonname, sockaddr) in addresses: - try: - self._socket = socket.socket(af, socktype, proto) - if self.ssl_options: - if not ssl: - raise Exception("This version of Python was not compiled with SSL support") - self._socket = ssl.wrap_socket(self._socket, **self.ssl_options) - self._socket.settimeout(1.0) # TODO potentially make this value configurable - self._socket.connect(sockaddr) - sockerr = None - break - except socket.error as err: - sockerr = err - if sockerr: - raise socket.error(sockerr.errno, "Tried connecting to %s. Last error: %s" % ([a[4] for a in addresses], sockerr.strerror)) - + self._connect_socket() self._socket.setblocking(0) - if self.sockopts: - for args in self.sockopts: - self._socket.setsockopt(*args) - with self._libevloop._lock: self._read_watcher = libev.IO(self._socket.fileno(), libev.EV_READ, self._libevloop._loop, self.handle_read) self._write_watcher = libev.IO(self._socket.fileno(), libev.EV_WRITE, self._libevloop._loop, self.handle_write) diff --git a/cassandra/io/twistedreactor.py b/cassandra/io/twistedreactor.py index ff81e5613f..27d4f7a471 100644 --- a/cassandra/io/twistedreactor.py +++ b/cassandra/io/twistedreactor.py @@ -22,7 +22,6 @@ import weakref import atexit -from cassandra import OperationTimedOut from cassandra.connection import Connection, ConnectionShutdown from cassandra.protocol import RegisterMessage From 2b3a72efcc47479a7bd3adef3a522d6550047ae1 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 11 Jun 2015 11:47:08 -0500 Subject: [PATCH 1508/3726] cqle: use integer division for converting timedelta to seconds Emulates the timedelta.total_seconds() function in python 2.7+ --- cassandra/cqlengine/functions.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/cassandra/cqlengine/functions.py b/cassandra/cqlengine/functions.py index 43c98afea5..d69e16f1fe 100644 --- a/cassandra/cqlengine/functions.py +++ b/cassandra/cqlengine/functions.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import division from datetime import datetime from cassandra.cqlengine import UnicodeMixin, ValidationError @@ -23,7 +24,9 @@ def get_total_seconds(td): return td.total_seconds() else: def get_total_seconds(td): - return 86400*td.days + td.seconds + td.microseconds/1e6 + # integer division used here to emulate built-in total_seconds + return ((86400 * td.days + td.seconds) * 10 ** 6 + td.microseconds) / 10 ** 6 + class QueryValue(UnicodeMixin): """ From b47cbd627924fc574660fa81c67b175f733b98d1 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 11 Jun 2015 11:49:34 -0500 Subject: [PATCH 1509/3726] cqle: Update docs reflecting new 2.6 compatibility --- README.rst | 2 -- docs/index.rst | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/README.rst b/README.rst index 110505da97..bbb63a323b 100644 --- a/README.rst +++ b/README.rst @@ -8,8 +8,6 @@ A modern, `feature-rich `_ a The driver supports Python 2.6, 2.7, 3.3, and 3.4*. -\* cqlengine component presently supports Python 2.6+ - Feedback Requested ------------------ **Help us focus our efforts!** Provide your input on the `Platform and Runtime Survey `_ (we kept it short). diff --git a/docs/index.rst b/docs/index.rst index 0b89c369da..7775dc62b4 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -4,7 +4,7 @@ A Python client driver for `Apache Cassandra `_. This driver works exclusively with the Cassandra Query Language v3 (CQL3) and Cassandra's native protocol. Cassandra 1.2+ is supported. -The core driver supports Python 2.6, 2.7, 3.3, and 3.4. The object mapper is presently supported in 2.7+. +The driver supports Python 2.6, 2.7, 3.3, and 3.4. This driver is open source under the `Apache v2 License `_. From 2f6d11208e76bcf8975f4b08129254b2bc165e24 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 11 Jun 2015 16:33:20 -0500 Subject: [PATCH 1510/3726] cqle: Make integration tests run in Python 2.6 PYTHON-288 --- cassandra/cqlengine/columns.py | 4 +- cassandra/cqlengine/functions.py | 39 ++---- tests/integration/cqlengine/base.py | 12 +- .../columns/test_container_columns.py | 113 ++++++++---------- .../cqlengine/columns/test_counter_column.py | 17 ++- .../cqlengine/columns/test_static_column.py | 14 ++- .../cqlengine/columns/test_validation.py | 26 ++-- .../cqlengine/columns/test_value_io.py | 70 ++++++----- .../cqlengine/connections/__init__.py | 14 --- .../management/test_compaction_settings.py | 7 +- .../cqlengine/management/test_management.py | 8 +- .../model/test_equality_operations.py | 1 - .../integration/cqlengine/model/test_model.py | 10 +- .../cqlengine/model/test_model_io.py | 5 +- .../integration/cqlengine/model/test_udts.py | 5 +- .../cqlengine/operators/test_base_operator.py | 8 +- .../cqlengine/query/test_batch_query.py | 2 +- .../cqlengine/query/test_datetime_queries.py | 3 +- .../cqlengine/query/test_queryoperators.py | 4 +- .../cqlengine/query/test_queryset.py | 10 +- .../cqlengine/query/test_updates.py | 22 ++-- .../statements/test_assignment_clauses.py | 51 ++++---- .../statements/test_assignment_statement.py | 7 +- .../statements/test_base_statement.py | 7 +- .../statements/test_insert_statement.py | 7 +- .../statements/test_select_statement.py | 7 +- .../statements/test_update_statement.py | 10 +- .../cqlengine/statements/test_where_clause.py | 7 +- .../integration/cqlengine/test_batch_query.py | 2 +- .../integration/cqlengine/test_ifnotexists.py | 9 +- tests/integration/cqlengine/test_load.py | 8 +- tests/integration/cqlengine/test_timestamp.py | 15 ++- .../integration/cqlengine/test_transaction.py | 7 +- tox.ini | 5 - 34 files changed, 280 insertions(+), 256 deletions(-) delete mode 100644 tests/integration/cqlengine/connections/__init__.py diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index 13dea5b430..8f8a6e2546 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -639,7 +639,7 @@ def validate(self, value): if val is None: return try: - return _Decimal(val) + return _Decimal(repr(val)) if isinstance(val, float) else _Decimal(val) except InvalidOperation: raise ValidationError("{0} '{1}' can't be coerced to decimal".format(self.column_name, val)) @@ -808,7 +808,7 @@ def validate(self, value): if not isinstance(val, dict): raise ValidationError('{0} {1} is not a dict object'.format(self.column_name, val)) if None in val: - raise ValidationError("{} None is not allowed in a map".format(self.column_name)) + raise ValidationError("{0} None is not allowed in a map".format(self.column_name)) return dict((self.key_col.validate(k), self.value_col.validate(v)) for k, v in val.items()) def to_python(self, value): diff --git a/cassandra/cqlengine/functions.py b/cassandra/cqlengine/functions.py index d69e16f1fe..bc210d69ce 100644 --- a/cassandra/cqlengine/functions.py +++ b/cassandra/cqlengine/functions.py @@ -62,23 +62,16 @@ class BaseQueryFunction(QueryValue): pass -class MinTimeUUID(BaseQueryFunction): - """ - return a fake timeuuid corresponding to the smallest possible timeuuid for the given timestamp - - http://cassandra.apache.org/doc/cql3/CQL.html#timeuuidFun - """ - - format_string = 'MinTimeUUID(%({0})s)' +class TimeUUIDQueryFunction(BaseQueryFunction): def __init__(self, value): """ - :param value: the time to create a maximum time uuid from + :param value: the time to create bounding time uuid from :type value: datetime """ if not isinstance(value, datetime): raise ValidationError('datetime instance is required') - super(MinTimeUUID, self).__init__(value) + super(TimeUUIDQueryFunction, self).__init__(value) def to_database(self, val): epoch = datetime(1970, 1, 1, tzinfo=val.tzinfo) @@ -89,31 +82,23 @@ def update_context(self, ctx): ctx[str(self.context_id)] = self.to_database(self.value) -class MaxTimeUUID(BaseQueryFunction): +class MinTimeUUID(TimeUUIDQueryFunction): """ - return a fake timeuuid corresponding to the largest possible timeuuid for the given timestamp + return a fake timeuuid corresponding to the smallest possible timeuuid for the given timestamp http://cassandra.apache.org/doc/cql3/CQL.html#timeuuidFun """ + format_string = 'MinTimeUUID(%({0})s)' - format_string = 'MaxTimeUUID(%({0})s)' - def __init__(self, value): - """ - :param value: the time to create a minimum time uuid from - :type value: datetime - """ - if not isinstance(value, datetime): - raise ValidationError('datetime instance is required') - super(MaxTimeUUID, self).__init__(value) +class MaxTimeUUID(TimeUUIDQueryFunction): + """ + return a fake timeuuid corresponding to the largest possible timeuuid for the given timestamp - def to_database(self, val): - epoch = datetime(1970, 1, 1, tzinfo=val.tzinfo) - offset = epoch.tzinfo.utcoffset(epoch).total_seconds() if epoch.tzinfo else 0 - return int(((val - epoch).total_seconds() - offset) * 1000) + http://cassandra.apache.org/doc/cql3/CQL.html#timeuuidFun + """ + format_string = 'MaxTimeUUID(%({0})s)' - def update_context(self, ctx): - ctx[str(self.context_id)] = self.to_database(self.value) class Token(BaseQueryFunction): diff --git a/tests/integration/cqlengine/base.py b/tests/integration/cqlengine/base.py index f0b75f5bd1..856f2bac4f 100644 --- a/tests/integration/cqlengine/base.py +++ b/tests/integration/cqlengine/base.py @@ -11,28 +11,30 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa import sys -from unittest import TestCase from cassandra.cqlengine.connection import get_session -class BaseCassEngTestCase(TestCase): +class BaseCassEngTestCase(unittest.TestCase): session = None def setUp(self): self.session = get_session() - super(BaseCassEngTestCase, self).setUp() def assertHasAttr(self, obj, attr): self.assertTrue(hasattr(obj, attr), - "{} doesn't have attribute: {}".format(obj, attr)) + "{0} doesn't have attribute: {1}".format(obj, attr)) def assertNotHasAttr(self, obj, attr): self.assertFalse(hasattr(obj, attr), - "{} shouldn't have the attribute: {}".format(obj, attr)) + "{0} shouldn't have the attribute: {1}".format(obj, attr)) if sys.version_info > (3, 0): def assertItemsEqual(self, first, second, msg=None): diff --git a/tests/integration/cqlengine/columns/test_container_columns.py b/tests/integration/cqlengine/columns/test_container_columns.py index 6ec301eb67..213c625c66 100644 --- a/tests/integration/cqlengine/columns/test_container_columns.py +++ b/tests/integration/cqlengine/columns/test_container_columns.py @@ -13,13 +13,18 @@ # limitations under the License. from datetime import datetime, timedelta -import json, six, sys, traceback, logging +import json +import logging +import six +import sys +import traceback from uuid import uuid4 from cassandra import WriteTimeout -from cassandra.cqlengine.models import Model, ValidationError import cassandra.cqlengine.columns as columns +from cassandra.cqlengine.functions import get_total_seconds +from cassandra.cqlengine.models import Model, ValidationError from cassandra.cqlengine.management import sync_table, drop_table from tests.integration.cqlengine import is_prepend_reversed from tests.integration.cqlengine.base import BaseCassEngTestCase @@ -38,14 +43,16 @@ class JsonTestColumn(columns.Column): db_type = 'text' def to_python(self, value): - if value is None: return + if value is None: + return if isinstance(value, six.string_types): return json.loads(value) else: return value def to_database(self, value): - if value is None: return + if value is None: + return return json.dumps(value) @@ -53,18 +60,15 @@ class TestSetColumn(BaseCassEngTestCase): @classmethod def setUpClass(cls): - super(TestSetColumn, cls).setUpClass() drop_table(TestSetModel) sync_table(TestSetModel) @classmethod def tearDownClass(cls): - super(TestSetColumn, cls).tearDownClass() drop_table(TestSetModel) def test_add_none_fails(self): - with self.assertRaises(ValidationError): - m = TestSetModel.create(int_set=set([None])) + self.assertRaises(ValidationError, TestSetModel.create, **{'int_set': set([None])}) def test_empty_set_initial(self): """ @@ -83,7 +87,7 @@ def test_deleting_last_item_should_succeed(self): m.save() m = TestSetModel.get(partition=m.partition) - self.assertNotIn(5, m.int_set) + self.assertTrue(5 not in m.int_set) def test_blind_deleting_last_item_should_succeed(self): m = TestSetModel.create() @@ -93,7 +97,7 @@ def test_blind_deleting_last_item_should_succeed(self): TestSetModel.objects(partition=m.partition).update(int_set=set()) m = TestSetModel.get(partition=m.partition) - self.assertNotIn(5, m.int_set) + self.assertTrue(5 not in m.int_set) def test_empty_set_retrieval(self): m = TestSetModel.create() @@ -102,7 +106,7 @@ def test_empty_set_retrieval(self): def test_io_success(self): """ Tests that a basic usage works as expected """ - m1 = TestSetModel.create(int_set={1, 2}, text_set={'kai', 'andreas'}) + m1 = TestSetModel.create(int_set=set((1, 2)), text_set=set(('kai', 'andreas'))) m2 = TestSetModel.get(partition=m1.partition) assert isinstance(m2.int_set, set) @@ -118,8 +122,7 @@ def test_type_validation(self): """ Tests that attempting to use the wrong types will raise an exception """ - with self.assertRaises(ValidationError): - TestSetModel.create(int_set={'string', True}, text_set={1, 3.0}) + self.assertRaises(ValidationError, TestSetModel.create, **{'int_set': set(('string', True)), 'text_set': set((1, 3.0))}) def test_element_count_validation(self): """ @@ -127,27 +130,26 @@ def test_element_count_validation(self): """ while True: try: - TestSetModel.create(text_set={str(uuid4()) for i in range(65535)}) + TestSetModel.create(text_set=set(str(uuid4()) for i in range(65535))) break except WriteTimeout: ex_type, ex, tb = sys.exc_info() log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) del tb - with self.assertRaises(ValidationError): - TestSetModel.create(text_set={str(uuid4()) for i in range(65536)}) + self.assertRaises(ValidationError, TestSetModel.create, **{'text_set': set(str(uuid4()) for i in range(65536))}) def test_partial_updates(self): """ Tests that partial udpates work as expected """ - m1 = TestSetModel.create(int_set={1, 2, 3, 4}) + m1 = TestSetModel.create(int_set=set((1, 2, 3, 4))) m1.int_set.add(5) m1.int_set.remove(1) - assert m1.int_set == {2, 3, 4, 5} + assert m1.int_set == set((2, 3, 4, 5)) m1.save() m2 = TestSetModel.get(partition=m1.partition) - assert m2.int_set == {2, 3, 4, 5} + assert m2.int_set == set((2, 3, 4, 5)) def test_instantiation_with_column_class(self): """ @@ -167,9 +169,9 @@ def test_instantiation_with_column_instance(self): def test_to_python(self): """ Tests that to_python of value column is called """ column = columns.Set(JsonTestColumn) - val = {1, 2, 3} + val = set((1, 2, 3)) db_val = column.to_database(val) - assert db_val == {json.dumps(v) for v in val} + assert db_val == set(json.dumps(v) for v in val) py_val = column.to_python(db_val) assert py_val == val @@ -177,17 +179,16 @@ def test_default_empty_container_saving(self): """ tests that the default empty container is not saved if it hasn't been updated """ pkey = uuid4() # create a row with set data - TestSetModel.create(partition=pkey, int_set={3, 4}) + TestSetModel.create(partition=pkey, int_set=set((3, 4))) # create another with no set data TestSetModel.create(partition=pkey) m = TestSetModel.get(partition=pkey) - self.assertEqual(m.int_set, {3, 4}) + self.assertEqual(m.int_set, set((3, 4))) class TestListModel(Model): - partition = columns.UUID(primary_key=True, default=uuid4) int_list = columns.List(columns.Integer, required=False) text_list = columns.List(columns.Text, required=False) @@ -197,20 +198,18 @@ class TestListColumn(BaseCassEngTestCase): @classmethod def setUpClass(cls): - super(TestListColumn, cls).setUpClass() drop_table(TestListModel) sync_table(TestListModel) @classmethod def tearDownClass(cls): - super(TestListColumn, cls).tearDownClass() drop_table(TestListModel) def test_initial(self): tmp = TestListModel.create() tmp.int_list.append(1) - def test_initial(self): + def test_initial_retrieve(self): tmp = TestListModel.create() tmp2 = TestListModel.get(partition=tmp.partition) tmp2.int_list.append(1) @@ -236,8 +235,7 @@ def test_type_validation(self): """ Tests that attempting to use the wrong types will raise an exception """ - with self.assertRaises(ValidationError): - TestListModel.create(int_list=['string', True], text_list=[1, 3.0]) + self.assertRaises(ValidationError, TestListModel.create, **{'int_list': ['string', True], 'text_list': [1, 3.0]}) def test_element_count_validation(self): """ @@ -251,8 +249,7 @@ def test_element_count_validation(self): ex_type, ex, tb = sys.exc_info() log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) del tb - with self.assertRaises(ValidationError): - TestListModel.create(text_list=[str(uuid4()) for i in range(65536)]) + self.assertRaises(ValidationError, TestListModel.create, **{'text_list': [str(uuid4()) for _ in range(65536)]}) def test_partial_updates(self): """ Tests that partial udpates work as expected """ @@ -300,16 +297,16 @@ def test_default_empty_container_saving(self): """ tests that the default empty container is not saved if it hasn't been updated """ pkey = uuid4() # create a row with list data - TestListModel.create(partition=pkey, int_list=[1,2,3,4]) + TestListModel.create(partition=pkey, int_list=[1, 2, 3, 4]) # create another with no list data TestListModel.create(partition=pkey) m = TestListModel.get(partition=pkey) - self.assertEqual(m.int_list, [1,2,3,4]) + self.assertEqual(m.int_list, [1, 2, 3, 4]) def test_remove_entry_works(self): pkey = uuid4() - tmp = TestListModel.create(partition=pkey, int_list=[1,2]) + tmp = TestListModel.create(partition=pkey, int_list=[1, 2]) tmp.int_list.pop() tmp.update() tmp = TestListModel.get(partition=pkey) @@ -317,7 +314,7 @@ def test_remove_entry_works(self): def test_update_from_non_empty_to_empty(self): pkey = uuid4() - tmp = TestListModel.create(partition=pkey, int_list=[1,2]) + tmp = TestListModel.create(partition=pkey, int_list=[1, 2]) tmp.int_list = [] tmp.update() @@ -326,8 +323,7 @@ def test_update_from_non_empty_to_empty(self): def test_insert_none(self): pkey = uuid4() - with self.assertRaises(ValidationError): - TestListModel.create(partition=pkey, int_list=[None]) + self.assertRaises(ValidationError, TestListModel.create, **{'partition': pkey, 'int_list': [None]}) def test_blind_list_updates_from_none(self): """ Tests that updates from None work as expected """ @@ -344,6 +340,7 @@ def test_blind_list_updates_from_none(self): m3 = TestListModel.get(partition=m.partition) assert m3.int_list == [] + class TestMapModel(Model): partition = columns.UUID(primary_key=True, default=uuid4) int_map = columns.Map(columns.Integer, columns.UUID, required=False) @@ -354,13 +351,11 @@ class TestMapColumn(BaseCassEngTestCase): @classmethod def setUpClass(cls): - super(TestMapColumn, cls).setUpClass() drop_table(TestMapModel) sync_table(TestMapModel) @classmethod def tearDownClass(cls): - super(TestMapColumn, cls).tearDownClass() drop_table(TestMapModel) def test_empty_default(self): @@ -368,8 +363,7 @@ def test_empty_default(self): tmp.int_map['blah'] = 1 def test_add_none_as_map_key(self): - with self.assertRaises(ValidationError): - TestMapModel.create(int_map={None: uuid4()}) + self.assertRaises(ValidationError, TestMapModel.create, **{'int_map': {None: uuid4()}}) def test_empty_retrieve(self): tmp = TestMapModel.create() @@ -384,7 +378,7 @@ def test_remove_last_entry_works(self): tmp.save() tmp = TestMapModel.get(partition=tmp.partition) - self.assertNotIn("blah", tmp.int_map) + self.assertTrue("blah" not in tmp.int_map) def test_io_success(self): """ Tests that a basic usage works as expected """ @@ -395,25 +389,24 @@ def test_io_success(self): m1 = TestMapModel.create(int_map={1: k1, 2: k2}, text_map={'now': now, 'then': then}) m2 = TestMapModel.get(partition=m1.partition) - assert isinstance(m2.int_map, dict) - assert isinstance(m2.text_map, dict) + self.assertTrue(isinstance(m2.int_map, dict)) + self.assertTrue(isinstance(m2.text_map, dict)) - assert 1 in m2.int_map - assert 2 in m2.int_map - assert m2.int_map[1] == k1 - assert m2.int_map[2] == k2 + self.assertTrue(1 in m2.int_map) + self.assertTrue(2 in m2.int_map) + self.assertEqual(m2.int_map[1], k1) + self.assertEqual(m2.int_map[2], k2) - assert 'now' in m2.text_map - assert 'then' in m2.text_map - assert (now - m2.text_map['now']).total_seconds() < 0.001 - assert (then - m2.text_map['then']).total_seconds() < 0.001 + self.assertTrue('now' in m2.text_map) + self.assertTrue('then' in m2.text_map) + self.assertAlmostEqual(get_total_seconds(now - m2.text_map['now']), 0, 2) + self.assertAlmostEqual(get_total_seconds(then - m2.text_map['then']), 0, 2) def test_type_validation(self): """ Tests that attempting to use the wrong types will raise an exception """ - with self.assertRaises(ValidationError): - TestMapModel.create(int_map={'key': 2, uuid4(): 'val'}, text_map={2: 5}) + self.assertRaises(ValidationError, TestMapModel.create, **{'int_map': {'key': 2, uuid4(): 'val'}, 'text_map': {2: 5}}) def test_element_count_validation(self): """ @@ -421,19 +414,18 @@ def test_element_count_validation(self): """ while True: try: - TestMapModel.create(text_map={str(uuid4()): i for i in range(65535)}) + TestMapModel.create(text_map=dict((str(uuid4()), i) for i in range(65535))) break except WriteTimeout: ex_type, ex, tb = sys.exc_info() log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) del tb - with self.assertRaises(ValidationError): - TestMapModel.create(text_map={str(uuid4()): i for i in range(65536)}) + self.assertRaises(ValidationError, TestMapModel.create, **{'text_map': dict((str(uuid4()), i) for i in range(65536))}) def test_partial_updates(self): """ Tests that partial udpates work as expected """ now = datetime.now() - #derez it a bit + # derez it a bit now = datetime(*now.timetuple()[:-3]) early = now - timedelta(minutes=30) earlier = early - timedelta(minutes=30) @@ -511,7 +503,7 @@ def test_to_python(self): column = columns.Map(JsonTestColumn, JsonTestColumn) val = {1: 2, 3: 4, 5: 6} db_val = column.to_database(val) - assert db_val == {json.dumps(k):json.dumps(v) for k,v in val.items()} + assert db_val == dict((json.dumps(k), json.dumps(v)) for k, v in val.items()) py_val = column.to_python(db_val) assert py_val == val @@ -530,7 +522,6 @@ def test_default_empty_container_saving(self): class TestCamelMapModel(Model): - partition = columns.UUID(primary_key=True, default=uuid4) camelMap = columns.Map(columns.Text, columns.Integer, required=False) @@ -539,13 +530,11 @@ class TestCamelMapColumn(BaseCassEngTestCase): @classmethod def setUpClass(cls): - super(TestCamelMapColumn, cls).setUpClass() drop_table(TestCamelMapModel) sync_table(TestCamelMapModel) @classmethod def tearDownClass(cls): - super(TestCamelMapColumn, cls).tearDownClass() drop_table(TestCamelMapModel) def test_camelcase_column(self): diff --git a/tests/integration/cqlengine/columns/test_counter_column.py b/tests/integration/cqlengine/columns/test_counter_column.py index caded408cb..c9579ba7b9 100644 --- a/tests/integration/cqlengine/columns/test_counter_column.py +++ b/tests/integration/cqlengine/columns/test_counter_column.py @@ -31,41 +31,48 @@ class TestClassConstruction(BaseCassEngTestCase): def test_defining_a_non_counter_column_fails(self): """ Tests that defining a non counter column field in a model with a counter column fails """ - with self.assertRaises(ModelDefinitionException): + try: class model(Model): partition = columns.UUID(primary_key=True, default=uuid4) counter = columns.Counter() text = columns.Text() + self.fail("did not raise expected ModelDefinitionException") + except ModelDefinitionException: + pass def test_defining_a_primary_key_counter_column_fails(self): """ Tests that defining primary keys on counter columns fails """ - with self.assertRaises(TypeError): + try: class model(Model): partition = columns.UUID(primary_key=True, default=uuid4) cluster = columns.Counter(primary_ley=True) counter = columns.Counter() + self.fail("did not raise expected TypeError") + except TypeError: + pass # force it - with self.assertRaises(ModelDefinitionException): + try: class model(Model): partition = columns.UUID(primary_key=True, default=uuid4) cluster = columns.Counter() cluster.primary_key = True counter = columns.Counter() + self.fail("did not raise expected ModelDefinitionException") + except ModelDefinitionException: + pass class TestCounterColumn(BaseCassEngTestCase): @classmethod def setUpClass(cls): - super(TestCounterColumn, cls).setUpClass() drop_table(TestCounterModel) sync_table(TestCounterModel) @classmethod def tearDownClass(cls): - super(TestCounterColumn, cls).tearDownClass() drop_table(TestCounterModel) def test_updates(self): diff --git a/tests/integration/cqlengine/columns/test_static_column.py b/tests/integration/cqlengine/columns/test_static_column.py index 7037e92a50..a4864050d9 100644 --- a/tests/integration/cqlengine/columns/test_static_column.py +++ b/tests/integration/cqlengine/columns/test_static_column.py @@ -12,7 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -from unittest import skipUnless +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa + from uuid import uuid4 from cassandra.cqlengine import columns @@ -33,19 +37,21 @@ class TestStaticModel(Model): text = columns.Text() -@skipUnless(STATIC_SUPPORTED, "only runs against the cql3 protocol v2.0") class TestStaticColumn(BaseCassEngTestCase): + def setUp(cls): + if not STATIC_SUPPORTED: + raise unittest.SkipTest("only runs against the cql3 protocol v2.0") + super(TestStaticColumn, cls).setUp() + @classmethod def setUpClass(cls): - super(TestStaticColumn, cls).setUpClass() drop_table(TestStaticModel) if STATIC_SUPPORTED: # setup and teardown run regardless of skip sync_table(TestStaticModel) @classmethod def tearDownClass(cls): - super(TestStaticColumn, cls).tearDownClass() drop_table(TestStaticModel) def test_mixed_updates(self): diff --git a/tests/integration/cqlengine/columns/test_validation.py b/tests/integration/cqlengine/columns/test_validation.py index a74f83a0de..5426548e5e 100644 --- a/tests/integration/cqlengine/columns/test_validation.py +++ b/tests/integration/cqlengine/columns/test_validation.py @@ -12,9 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa + from datetime import datetime, timedelta, date, tzinfo from decimal import Decimal as D -from unittest import TestCase, SkipTest from uuid import uuid4, uuid1 from cassandra import InvalidRequest @@ -46,12 +50,10 @@ class DatetimeTest(Model): @classmethod def setUpClass(cls): - super(TestDatetime, cls).setUpClass() sync_table(cls.DatetimeTest) @classmethod def tearDownClass(cls): - super(TestDatetime, cls).tearDownClass() drop_table(cls.DatetimeTest) def test_datetime_io(self): @@ -95,7 +97,6 @@ class BoolDefaultValueTest(Model): @classmethod def setUpClass(cls): - super(TestBoolDefault, cls).setUpClass() sync_table(cls.BoolDefaultValueTest) def test_default_is_set(self): @@ -112,7 +113,6 @@ class BoolValidationTest(Model): @classmethod def setUpClass(cls): - super(TestBoolValidation, cls).setUpClass() sync_table(cls.BoolValidationTest) def test_validation_preserves_none(self): @@ -129,12 +129,10 @@ class VarIntTest(Model): @classmethod def setUpClass(cls): - super(TestVarInt, cls).setUpClass() sync_table(cls.VarIntTest) @classmethod def tearDownClass(cls): - super(TestVarInt, cls).tearDownClass() sync_table(cls.VarIntTest) def test_varint_io(self): @@ -155,14 +153,12 @@ class DateTest(Model): @classmethod def setUpClass(cls): if PROTOCOL_VERSION < 4: - raise SkipTest("Protocol v4 datatypes require native protocol 4+, currently using: {0}".format(PROTOCOL_VERSION)) + raise unittest.SkipTest("Protocol v4 datatypes require native protocol 4+, currently using: {0}".format(PROTOCOL_VERSION)) - super(TestDate, cls).setUpClass() sync_table(cls.DateTest) @classmethod def tearDownClass(cls): - super(TestDate, cls).tearDownClass() drop_table(cls.DateTest) def test_date_io(self): @@ -195,12 +191,10 @@ class DecimalTest(Model): @classmethod def setUpClass(cls): - super(TestDecimal, cls).setUpClass() sync_table(cls.DecimalTest) @classmethod def tearDownClass(cls): - super(TestDecimal, cls).tearDownClass() drop_table(cls.DecimalTest) def test_decimal_io(self): @@ -220,12 +214,10 @@ class UUIDTest(Model): @classmethod def setUpClass(cls): - super(TestUUID, cls).setUpClass() sync_table(cls.UUIDTest) @classmethod def tearDownClass(cls): - super(TestUUID, cls).tearDownClass() drop_table(cls.UUIDTest) def test_uuid_str_with_dashes(self): @@ -255,12 +247,10 @@ class TimeUUIDTest(Model): @classmethod def setUpClass(cls): - super(TestTimeUUID, cls).setUpClass() sync_table(cls.TimeUUIDTest) @classmethod def tearDownClass(cls): - super(TestTimeUUID, cls).tearDownClass() drop_table(cls.TimeUUIDTest) def test_timeuuid_io(self): @@ -361,10 +351,10 @@ def test_extra_field(self): drop_table(self.TestModel) sync_table(self.TestModel) self.TestModel.create() - execute("ALTER TABLE {} add blah int".format(self.TestModel.column_family_name(include_keyspace=True))) + execute("ALTER TABLE {0} add blah int".format(self.TestModel.column_family_name(include_keyspace=True))) self.TestModel.objects().all() -class TestTimeUUIDFromDatetime(TestCase): +class TestTimeUUIDFromDatetime(BaseCassEngTestCase): def test_conversion_specific_date(self): dt = datetime(1981, 7, 11, microsecond=555000) diff --git a/tests/integration/cqlengine/columns/test_value_io.py b/tests/integration/cqlengine/columns/test_value_io.py index 243f7096ad..3a13b5a4e0 100644 --- a/tests/integration/cqlengine/columns/test_value_io.py +++ b/tests/integration/cqlengine/columns/test_value_io.py @@ -11,12 +11,15 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa from datetime import datetime, timedelta, time from decimal import Decimal from uuid import uuid1, uuid4, UUID import six -import unittest from cassandra.cqlengine import columns from cassandra.cqlengine.management import sync_table @@ -58,7 +61,7 @@ def setUpClass(cls): # create a table with the given column class IOTestModel(Model): - table_name = cls.column.db_type + "_io_test_model_{}".format(uuid4().hex[:8]) + table_name = cls.column.db_type + "_io_test_model_{0}".format(uuid4().hex[:8]) pkey = cls.column(primary_key=True) data = cls.column() @@ -150,22 +153,6 @@ class TestDateTime(BaseColumnIOTest): data_val = now + timedelta(days=1) -class TestDate(BaseColumnIOTest): - - @classmethod - def setUpClass(cls): - if PROTOCOL_VERSION < 4: - raise unittest.SkipTest("Protocol v4 datatypes require native protocol 4+, currently using: {0}".format(PROTOCOL_VERSION)) - - super(TestDate, cls).setUpClass() - - column = columns.Date - - now = Date(datetime.now().date()) - pkey_val = now - data_val = Date(now.days_from_epoch + 1) - - class TestUUID(BaseColumnIOTest): column = columns.UUID @@ -218,16 +205,43 @@ class TestDecimalIO(BaseColumnIOTest): data_val = Decimal('0.005'), 3.5, '8' def comparator_converter(self, val): - return Decimal(val) + return Decimal(repr(val) if isinstance(val, float) else val) -class TestTime(BaseColumnIOTest): + +class ProtocolV4Test(BaseColumnIOTest): @classmethod def setUpClass(cls): + if PROTOCOL_VERSION >= 4: + super(ProtocolV4Test, cls).setUpClass() + + @classmethod + def tearDownClass(cls): + if PROTOCOL_VERSION >= 4: + super(ProtocolV4Test, cls).tearDownClass() + +class TestDate(ProtocolV4Test): + + def setUp(self): + if PROTOCOL_VERSION < 4: + raise unittest.SkipTest("Protocol v4 datatypes require native protocol 4+, currently using: {0}".format(PROTOCOL_VERSION)) + + super(TestDate, self).setUp() + + column = columns.Date + + now = Date(datetime.now().date()) + pkey_val = now + data_val = Date(now.days_from_epoch + 1) + + +class TestTime(ProtocolV4Test): + + def setUp(self): if PROTOCOL_VERSION < 4: raise unittest.SkipTest("Protocol v4 datatypes require native protocol 4+, currently using: {0}".format(PROTOCOL_VERSION)) - super(TestTime, cls).setUpClass() + super(TestTime, self).setUp() column = columns.Time @@ -235,14 +249,13 @@ def setUpClass(cls): data_val = Time(time(16, 47, 25, 7)) -class TestSmallInt(BaseColumnIOTest): +class TestSmallInt(ProtocolV4Test): - @classmethod - def setUpClass(cls): + def setUp(self): if PROTOCOL_VERSION < 4: raise unittest.SkipTest("Protocol v4 datatypes require native protocol 4+, currently using: {0}".format(PROTOCOL_VERSION)) - super(TestSmallInt, cls).setUpClass() + super(TestSmallInt, self).setUp() column = columns.SmallInt @@ -250,14 +263,13 @@ def setUpClass(cls): data_val = 32523 -class TestTinyInt(BaseColumnIOTest): +class TestTinyInt(ProtocolV4Test): - @classmethod - def setUpClass(cls): + def setUp(self): if PROTOCOL_VERSION < 4: raise unittest.SkipTest("Protocol v4 datatypes require native protocol 4+, currently using: {0}".format(PROTOCOL_VERSION)) - super(TestTinyInt, cls).setUpClass() + super(TestTinyInt, self).setUp() column = columns.TinyInt diff --git a/tests/integration/cqlengine/connections/__init__.py b/tests/integration/cqlengine/connections/__init__.py deleted file mode 100644 index 55181d93fe..0000000000 --- a/tests/integration/cqlengine/connections/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright 2015 DataStax, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - diff --git a/tests/integration/cqlengine/management/test_compaction_settings.py b/tests/integration/cqlengine/management/test_compaction_settings.py index 0efd91570d..df443bf86f 100644 --- a/tests/integration/cqlengine/management/test_compaction_settings.py +++ b/tests/integration/cqlengine/management/test_compaction_settings.py @@ -36,10 +36,11 @@ def assert_option_fails(self, key): # key is a normal_key, converted to # __compaction_key__ - key = "__compaction_{}__".format(key) + key = "__compaction_{0}__".format(key) - with patch.object(self.model, key, 10), self.assertRaises(CQLEngineException): - get_compaction_options(self.model) + with patch.object(self.model, key, 10): + with self.assertRaises(CQLEngineException): + get_compaction_options(self.model) class SizeTieredCompactionTest(BaseCompactionTest): diff --git a/tests/integration/cqlengine/management/test_management.py b/tests/integration/cqlengine/management/test_management.py index 99b34c5cf0..1601f318c8 100644 --- a/tests/integration/cqlengine/management/test_management.py +++ b/tests/integration/cqlengine/management/test_management.py @@ -11,10 +11,12 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa import mock - -from unittest import skipUnless import warnings from cassandra.cqlengine import CACHING_ALL, CACHING_NONE @@ -310,7 +312,7 @@ def test_failure(self): sync_table(self.FakeModel) -@skipUnless(PROTOCOL_VERSION >= 2, "only runs against the cql3 protocol v2.0") +@unittest.skipUnless(PROTOCOL_VERSION >= 2, "only runs against the cql3 protocol v2.0") def test_static_columns(): class StaticModel(Model): id = columns.Integer(primary_key=True) diff --git a/tests/integration/cqlengine/model/test_equality_operations.py b/tests/integration/cqlengine/model/test_equality_operations.py index 0c098deeac..8ed38327ef 100644 --- a/tests/integration/cqlengine/model/test_equality_operations.py +++ b/tests/integration/cqlengine/model/test_equality_operations.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from unittest import skip from uuid import uuid4 from tests.integration.cqlengine.base import BaseCassEngTestCase diff --git a/tests/integration/cqlengine/model/test_model.py b/tests/integration/cqlengine/model/test_model.py index 0ac81c35e6..b66e284fae 100644 --- a/tests/integration/cqlengine/model/test_model.py +++ b/tests/integration/cqlengine/model/test_model.py @@ -11,15 +11,17 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -from unittest import TestCase +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa from cassandra.cqlengine import columns from cassandra.cqlengine.management import sync_table, drop_table, create_keyspace_simple, drop_keyspace from cassandra.cqlengine.models import Model, ModelDefinitionException -class TestModel(TestCase): +class TestModel(unittest.TestCase): """ Tests the non-io functionality of models """ def test_instance_equality(self): @@ -104,7 +106,7 @@ class table(Model): drop_keyspace('keyspace') -class BuiltInAttributeConflictTest(TestCase): +class BuiltInAttributeConflictTest(unittest.TestCase): """tests Model definitions that conflict with built-in attributes/methods""" def test_model_with_attribute_name_conflict(self): diff --git a/tests/integration/cqlengine/model/test_model_io.py b/tests/integration/cqlengine/model/test_model_io.py index 2033d9dc92..8de0fe6ab0 100644 --- a/tests/integration/cqlengine/model/test_model_io.py +++ b/tests/integration/cqlengine/model/test_model_io.py @@ -11,10 +11,13 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa from uuid import uuid4, UUID import random -import unittest from datetime import datetime, date, time from decimal import Decimal from operator import itemgetter diff --git a/tests/integration/cqlengine/model/test_udts.py b/tests/integration/cqlengine/model/test_udts.py index 2d870d38ed..25c3ccee44 100644 --- a/tests/integration/cqlengine/model/test_udts.py +++ b/tests/integration/cqlengine/model/test_udts.py @@ -11,10 +11,13 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa from datetime import datetime, date, time from decimal import Decimal -import unittest from uuid import UUID, uuid4 from cassandra.cqlengine.models import Model diff --git a/tests/integration/cqlengine/operators/test_base_operator.py b/tests/integration/cqlengine/operators/test_base_operator.py index 9a15f829fa..8df0da7f02 100644 --- a/tests/integration/cqlengine/operators/test_base_operator.py +++ b/tests/integration/cqlengine/operators/test_base_operator.py @@ -12,11 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. -from unittest import TestCase +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa + from cassandra.cqlengine.operators import BaseQueryOperator, QueryOperatorException -class BaseOperatorTest(TestCase): +class BaseOperatorTest(unittest.TestCase): def test_get_operator_cannot_be_called_from_base_class(self): with self.assertRaises(QueryOperatorException): diff --git a/tests/integration/cqlengine/query/test_batch_query.py b/tests/integration/cqlengine/query/test_batch_query.py index 1247f680b2..d9dc33c22d 100644 --- a/tests/integration/cqlengine/query/test_batch_query.py +++ b/tests/integration/cqlengine/query/test_batch_query.py @@ -116,7 +116,7 @@ def test_bulk_delete_success_case(self): for i in range(1): for j in range(5): - TestMultiKeyModel.create(partition=i, cluster=j, count=i*j, text='{}:{}'.format(i,j)) + TestMultiKeyModel.create(partition=i, cluster=j, count=i*j, text='{0}:{1}'.format(i,j)) with BatchQuery() as b: TestMultiKeyModel.objects.batch(b).filter(partition=0).delete() diff --git a/tests/integration/cqlengine/query/test_datetime_queries.py b/tests/integration/cqlengine/query/test_datetime_queries.py index 936f4fc78b..ebce19f449 100644 --- a/tests/integration/cqlengine/query/test_datetime_queries.py +++ b/tests/integration/cqlengine/query/test_datetime_queries.py @@ -14,6 +14,7 @@ from datetime import datetime, timedelta from uuid import uuid4 +from cassandra.cqlengine.functions import get_total_seconds from tests.integration.cqlengine.base import BaseCassEngTestCase @@ -66,6 +67,6 @@ def test_datetime_precision(self): obj = DateTimeQueryTestModel.create(user=pk, day=now, data='energy cheese') load = DateTimeQueryTestModel.get(user=pk) - assert abs(now - load.day).total_seconds() < 0.001 + self.assertAlmostEqual(get_total_seconds(now - load.day), 0, 2) obj.delete() diff --git a/tests/integration/cqlengine/query/test_queryoperators.py b/tests/integration/cqlengine/query/test_queryoperators.py index 4e28090672..f3154de90b 100644 --- a/tests/integration/cqlengine/query/test_queryoperators.py +++ b/tests/integration/cqlengine/query/test_queryoperators.py @@ -99,7 +99,7 @@ class TestModel(Model): q = TestModel.objects.filter(pk__token__gt=func) where = q._where[0] where.set_context_id(1) - self.assertEqual(str(where), 'token("p1", "p2") > token(%({})s, %({})s)'.format(1, 2)) + self.assertEqual(str(where), 'token("p1", "p2") > token(%({0})s, %({1})s)'.format(1, 2)) # Verify that a SELECT query can be successfully generated str(q._select_query()) @@ -111,7 +111,7 @@ class TestModel(Model): q = TestModel.objects.filter(pk__token__gt=func) where = q._where[0] where.set_context_id(1) - self.assertEqual(str(where), 'token("p1", "p2") > token(%({})s, %({})s)'.format(1, 2)) + self.assertEqual(str(where), 'token("p1", "p2") > token(%({0})s, %({1})s)'.format(1, 2)) str(q._select_query()) # The 'pk__token' virtual column may only be compared to a Token diff --git a/tests/integration/cqlengine/query/test_queryset.py b/tests/integration/cqlengine/query/test_queryset.py index 8d8610b16b..6d5fc926c5 100644 --- a/tests/integration/cqlengine/query/test_queryset.py +++ b/tests/integration/cqlengine/query/test_queryset.py @@ -11,11 +11,15 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from __future__ import absolute_import + +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa + from datetime import datetime import time -from unittest import TestCase, skipUnless from uuid import uuid1, uuid4 import uuid @@ -685,7 +689,7 @@ def test_objects_property_returns_fresh_queryset(self): assert TestModel.objects._result_cache is None -@skipUnless(PROTOCOL_VERSION >= 2, "only runs against the cql3 protocol v2.0") +@unittest.skipUnless(PROTOCOL_VERSION >= 2, "only runs against the cql3 protocol v2.0") def test_paged_result_handling(): # addresses #225 class PagingTest(Model): diff --git a/tests/integration/cqlengine/query/test_updates.py b/tests/integration/cqlengine/query/test_updates.py index 799e74d135..a3b80f15f5 100644 --- a/tests/integration/cqlengine/query/test_updates.py +++ b/tests/integration/cqlengine/query/test_updates.py @@ -137,11 +137,11 @@ def test_set_add_updates(self): partition = uuid4() cluster = 1 TestQueryUpdateModel.objects.create( - partition=partition, cluster=cluster, text_set={"foo"}) + partition=partition, cluster=cluster, text_set=set(("foo",))) TestQueryUpdateModel.objects( - partition=partition, cluster=cluster).update(text_set__add={'bar'}) + partition=partition, cluster=cluster).update(text_set__add=set(('bar',))) obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster) - self.assertEqual(obj.text_set, {"foo", "bar"}) + self.assertEqual(obj.text_set, set(("foo", "bar"))) def test_set_add_updates_new_record(self): """ If the key doesn't exist yet, an update creates the record @@ -149,20 +149,20 @@ def test_set_add_updates_new_record(self): partition = uuid4() cluster = 1 TestQueryUpdateModel.objects( - partition=partition, cluster=cluster).update(text_set__add={'bar'}) + partition=partition, cluster=cluster).update(text_set__add=set(('bar',))) obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster) - self.assertEqual(obj.text_set, {"bar"}) + self.assertEqual(obj.text_set, set(("bar",))) def test_set_remove_updates(self): partition = uuid4() cluster = 1 TestQueryUpdateModel.objects.create( - partition=partition, cluster=cluster, text_set={"foo", "baz"}) + partition=partition, cluster=cluster, text_set=set(("foo", "baz"))) TestQueryUpdateModel.objects( partition=partition, cluster=cluster).update( - text_set__remove={'foo'}) + text_set__remove=set(('foo',))) obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster) - self.assertEqual(obj.text_set, {"baz"}) + self.assertEqual(obj.text_set, set(("baz",))) def test_set_remove_new_record(self): """ Removing something not in the set should silently do nothing @@ -170,12 +170,12 @@ def test_set_remove_new_record(self): partition = uuid4() cluster = 1 TestQueryUpdateModel.objects.create( - partition=partition, cluster=cluster, text_set={"foo"}) + partition=partition, cluster=cluster, text_set=set(("foo",))) TestQueryUpdateModel.objects( partition=partition, cluster=cluster).update( - text_set__remove={'afsd'}) + text_set__remove=set(('afsd',))) obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster) - self.assertEqual(obj.text_set, {"foo"}) + self.assertEqual(obj.text_set, set(("foo",))) def test_list_append_updates(self): partition = uuid4() diff --git a/tests/integration/cqlengine/statements/test_assignment_clauses.py b/tests/integration/cqlengine/statements/test_assignment_clauses.py index 76d526f225..9874fddcf0 100644 --- a/tests/integration/cqlengine/statements/test_assignment_clauses.py +++ b/tests/integration/cqlengine/statements/test_assignment_clauses.py @@ -11,12 +11,15 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa -from unittest import TestCase from cassandra.cqlengine.statements import AssignmentClause, SetUpdateClause, ListUpdateClause, MapUpdateClause, MapDeleteClause, FieldDeleteClause, CounterUpdateClause -class AssignmentClauseTests(TestCase): +class AssignmentClauseTests(unittest.TestCase): def test_rendering(self): pass @@ -27,14 +30,14 @@ def test_insert_tuple(self): self.assertEqual(ac.insert_tuple(), ('a', 10)) -class SetUpdateClauseTests(TestCase): +class SetUpdateClauseTests(unittest.TestCase): def test_update_from_none(self): - c = SetUpdateClause('s', {1, 2}, previous=None) + c = SetUpdateClause('s', set((1, 2)), previous=None) c._analyze() c.set_context_id(0) - self.assertEqual(c._assignments, {1, 2}) + self.assertEqual(c._assignments, set((1, 2))) self.assertIsNone(c._additions) self.assertIsNone(c._removals) @@ -43,11 +46,11 @@ def test_update_from_none(self): ctx = {} c.update_context(ctx) - self.assertEqual(ctx, {'0': {1, 2}}) + self.assertEqual(ctx, {'0': set((1, 2))}) def test_null_update(self): """ tests setting a set to None creates an empty update statement """ - c = SetUpdateClause('s', None, previous={1, 2}) + c = SetUpdateClause('s', None, previous=set((1, 2))) c._analyze() c.set_context_id(0) @@ -64,7 +67,7 @@ def test_null_update(self): def test_no_update(self): """ tests an unchanged value creates an empty update statement """ - c = SetUpdateClause('s', {1, 2}, previous={1, 2}) + c = SetUpdateClause('s', set((1, 2)), previous=set((1, 2))) c._analyze() c.set_context_id(0) @@ -95,15 +98,15 @@ def test_update_empty_set(self): ctx = {} c.update_context(ctx) - self.assertEqual(ctx, {'0' : set()}) + self.assertEqual(ctx, {'0': set()}) def test_additions(self): - c = SetUpdateClause('s', {1, 2, 3}, previous={1, 2}) + c = SetUpdateClause('s', set((1, 2, 3)), previous=set((1, 2))) c._analyze() c.set_context_id(0) self.assertIsNone(c._assignments) - self.assertEqual(c._additions, {3}) + self.assertEqual(c._additions, set((3,))) self.assertIsNone(c._removals) self.assertEqual(c.get_context_size(), 1) @@ -111,42 +114,42 @@ def test_additions(self): ctx = {} c.update_context(ctx) - self.assertEqual(ctx, {'0': {3}}) + self.assertEqual(ctx, {'0': set((3,))}) def test_removals(self): - c = SetUpdateClause('s', {1, 2}, previous={1, 2, 3}) + c = SetUpdateClause('s', set((1, 2)), previous=set((1, 2, 3))) c._analyze() c.set_context_id(0) self.assertIsNone(c._assignments) self.assertIsNone(c._additions) - self.assertEqual(c._removals, {3}) + self.assertEqual(c._removals, set((3,))) self.assertEqual(c.get_context_size(), 1) self.assertEqual(str(c), '"s" = "s" - %(0)s') ctx = {} c.update_context(ctx) - self.assertEqual(ctx, {'0': {3}}) + self.assertEqual(ctx, {'0': set((3,))}) def test_additions_and_removals(self): - c = SetUpdateClause('s', {2, 3}, previous={1, 2}) + c = SetUpdateClause('s', set((2, 3)), previous=set((1, 2))) c._analyze() c.set_context_id(0) self.assertIsNone(c._assignments) - self.assertEqual(c._additions, {3}) - self.assertEqual(c._removals, {1}) + self.assertEqual(c._additions, set((3,))) + self.assertEqual(c._removals, set((1,))) self.assertEqual(c.get_context_size(), 2) self.assertEqual(str(c), '"s" = "s" + %(0)s, "s" = "s" - %(1)s') ctx = {} c.update_context(ctx) - self.assertEqual(ctx, {'0': {3}, '1': {1}}) + self.assertEqual(ctx, {'0': set((3,)), '1': set((1,))}) -class ListUpdateClauseTests(TestCase): +class ListUpdateClauseTests(unittest.TestCase): def test_update_from_none(self): c = ListUpdateClause('s', [1, 2, 3]) @@ -262,7 +265,7 @@ def test_shrinking_list_update(self): self.assertEqual(ctx, {'0': [1, 2, 3]}) -class MapUpdateTests(TestCase): +class MapUpdateTests(unittest.TestCase): def test_update(self): c = MapUpdateClause('s', {3: 0, 5: 6}, previous={5: 0, 3: 4}) @@ -298,7 +301,7 @@ def test_nulled_columns_arent_included(self): self.assertNotIn(1, c._updates) -class CounterUpdateTests(TestCase): +class CounterUpdateTests(unittest.TestCase): def test_positive_update(self): c = CounterUpdateClause('a', 5, 3) @@ -334,7 +337,7 @@ def noop_update(self): self.assertEqual(ctx, {'5': 0}) -class MapDeleteTests(TestCase): +class MapDeleteTests(unittest.TestCase): def test_update(self): c = MapDeleteClause('s', {3: 0}, {1: 2, 3: 4, 5: 6}) @@ -350,7 +353,7 @@ def test_update(self): self.assertEqual(ctx, {'0': 1, '1': 5}) -class FieldDeleteTests(TestCase): +class FieldDeleteTests(unittest.TestCase): def test_str(self): f = FieldDeleteClause("blake") diff --git a/tests/integration/cqlengine/statements/test_assignment_statement.py b/tests/integration/cqlengine/statements/test_assignment_statement.py index 40ba73be36..18f08dd2e8 100644 --- a/tests/integration/cqlengine/statements/test_assignment_statement.py +++ b/tests/integration/cqlengine/statements/test_assignment_statement.py @@ -11,12 +11,15 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa -from unittest import TestCase from cassandra.cqlengine.statements import AssignmentStatement, StatementException -class AssignmentStatementTest(TestCase): +class AssignmentStatementTest(unittest.TestCase): def test_add_assignment_type_checking(self): """ tests that only assignment clauses can be added to queries """ diff --git a/tests/integration/cqlengine/statements/test_base_statement.py b/tests/integration/cqlengine/statements/test_base_statement.py index 5d8e6844a4..2d0f30e4c0 100644 --- a/tests/integration/cqlengine/statements/test_base_statement.py +++ b/tests/integration/cqlengine/statements/test_base_statement.py @@ -11,12 +11,15 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa -from unittest import TestCase from cassandra.cqlengine.statements import BaseCQLStatement, StatementException -class BaseStatementTest(TestCase): +class BaseStatementTest(unittest.TestCase): def test_where_clause_type_checking(self): """ tests that only assignment clauses can be added to queries """ diff --git a/tests/integration/cqlengine/statements/test_insert_statement.py b/tests/integration/cqlengine/statements/test_insert_statement.py index 302e04d80b..75a2e71b86 100644 --- a/tests/integration/cqlengine/statements/test_insert_statement.py +++ b/tests/integration/cqlengine/statements/test_insert_statement.py @@ -11,13 +11,16 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa -from unittest import TestCase from cassandra.cqlengine.statements import InsertStatement, StatementException, AssignmentClause import six -class InsertStatementTests(TestCase): +class InsertStatementTests(unittest.TestCase): def test_where_clause_failure(self): """ tests that where clauses cannot be added to Insert statements """ diff --git a/tests/integration/cqlengine/statements/test_select_statement.py b/tests/integration/cqlengine/statements/test_select_statement.py index b20feab774..4f920ce042 100644 --- a/tests/integration/cqlengine/statements/test_select_statement.py +++ b/tests/integration/cqlengine/statements/test_select_statement.py @@ -11,13 +11,16 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa -from unittest import TestCase from cassandra.cqlengine.statements import SelectStatement, WhereClause from cassandra.cqlengine.operators import * import six -class SelectStatementTests(TestCase): +class SelectStatementTests(unittest.TestCase): def test_single_field_is_listified(self): """ tests that passing a string field into the constructor puts it into a list """ diff --git a/tests/integration/cqlengine/statements/test_update_statement.py b/tests/integration/cqlengine/statements/test_update_statement.py index 0f77d53ca4..6c7bea6dc5 100644 --- a/tests/integration/cqlengine/statements/test_update_statement.py +++ b/tests/integration/cqlengine/statements/test_update_statement.py @@ -11,9 +11,11 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa -from unittest import TestCase -from cassandra.cqlengine.columns import Set, List from cassandra.cqlengine.operators import * from cassandra.cqlengine.statements import (UpdateStatement, WhereClause, AssignmentClause, SetUpdateClause, @@ -21,7 +23,7 @@ import six -class UpdateStatementTests(TestCase): +class UpdateStatementTests(unittest.TestCase): def test_table_rendering(self): """ tests that fields are properly added to the select statement """ @@ -60,7 +62,7 @@ def test_additional_rendering(self): def test_update_set_add(self): us = UpdateStatement('table') - us.add_assignment_clause(SetUpdateClause('a', {1}, operation='add')) + us.add_assignment_clause(SetUpdateClause('a', set((1,)), operation='add')) self.assertEqual(six.text_type(us), 'UPDATE table SET "a" = "a" + %(0)s') def test_update_empty_set_add_does_not_assign(self): diff --git a/tests/integration/cqlengine/statements/test_where_clause.py b/tests/integration/cqlengine/statements/test_where_clause.py index d29bc690e2..542cb1f9f2 100644 --- a/tests/integration/cqlengine/statements/test_where_clause.py +++ b/tests/integration/cqlengine/statements/test_where_clause.py @@ -11,14 +11,17 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa -from unittest import TestCase import six from cassandra.cqlengine.operators import EqualsOperator from cassandra.cqlengine.statements import StatementException, WhereClause -class TestWhereClause(TestCase): +class TestWhereClause(unittest.TestCase): def test_operator_check(self): """ tests that creating a where statement with a non BaseWhereOperator object fails """ diff --git a/tests/integration/cqlengine/test_batch_query.py b/tests/integration/cqlengine/test_batch_query.py index 85f3019ac9..a83dd2e942 100644 --- a/tests/integration/cqlengine/test_batch_query.py +++ b/tests/integration/cqlengine/test_batch_query.py @@ -108,7 +108,7 @@ def test_bulk_delete_success_case(self): for i in range(1): for j in range(5): - TestMultiKeyModel.create(partition=i, cluster=j, count=i*j, text='{}:{}'.format(i,j)) + TestMultiKeyModel.create(partition=i, cluster=j, count=i*j, text='{0}:{1}'.format(i,j)) with BatchQuery() as b: TestMultiKeyModel.objects.batch(b).filter(partition=0).delete() diff --git a/tests/integration/cqlengine/test_ifnotexists.py b/tests/integration/cqlengine/test_ifnotexists.py index 98b192a5d7..6a4387351c 100644 --- a/tests/integration/cqlengine/test_ifnotexists.py +++ b/tests/integration/cqlengine/test_ifnotexists.py @@ -11,9 +11,12 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa import mock -from unittest import skipUnless from uuid import uuid4 from cassandra.cqlengine import columns @@ -71,7 +74,7 @@ def tearDownClass(cls): class IfNotExistsInsertTests(BaseIfNotExistsTest): - @skipUnless(PROTOCOL_VERSION >= 2, "only runs against the cql3 protocol v2.0") + @unittest.skipUnless(PROTOCOL_VERSION >= 2, "only runs against the cql3 protocol v2.0") def test_insert_if_not_exists_success(self): """ tests that insertion with if_not_exists work as expected """ @@ -103,7 +106,7 @@ def test_insert_if_not_exists_failure(self): self.assertEqual(tm.count, 9) self.assertEqual(tm.text, '111111111111') - @skipUnless(PROTOCOL_VERSION >= 2, "only runs against the cql3 protocol v2.0") + @unittest.skipUnless(PROTOCOL_VERSION >= 2, "only runs against the cql3 protocol v2.0") def test_batch_insert_if_not_exists_success(self): """ tests that batch insertion with if_not_exists work as expected """ diff --git a/tests/integration/cqlengine/test_load.py b/tests/integration/cqlengine/test_load.py index 8c8d1d7923..4c62a525f3 100644 --- a/tests/integration/cqlengine/test_load.py +++ b/tests/integration/cqlengine/test_load.py @@ -11,22 +11,26 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa import gc import os import resource -from unittest import skipUnless from cassandra.cqlengine import columns from cassandra.cqlengine.models import Model from cassandra.cqlengine.management import sync_table + class LoadTest(Model): k = columns.Integer(primary_key=True) v = columns.Integer() -@skipUnless("LOADTEST" in os.environ, "LOADTEST not on") +@unittest.skipUnless("LOADTEST" in os.environ, "LOADTEST not on") def test_lots_of_queries(): sync_table(LoadTest) import objgraph diff --git a/tests/integration/cqlengine/test_timestamp.py b/tests/integration/cqlengine/test_timestamp.py index 5d37710ce6..dde4334ad6 100644 --- a/tests/integration/cqlengine/test_timestamp.py +++ b/tests/integration/cqlengine/test_timestamp.py @@ -40,8 +40,9 @@ def setUpClass(cls): class BatchTest(BaseTimestampTest): def test_batch_is_included(self): - with mock.patch.object(self.session, "execute") as m, BatchQuery(timestamp=timedelta(seconds=30)) as b: - TestTimestampModel.batch(b).create(count=1) + with mock.patch.object(self.session, "execute") as m: + with BatchQuery(timestamp=timedelta(seconds=30)) as b: + TestTimestampModel.batch(b).create(count=1) "USING TIMESTAMP".should.be.within(m.call_args[0][0].query_string) @@ -49,8 +50,9 @@ def test_batch_is_included(self): class CreateWithTimestampTest(BaseTimestampTest): def test_batch(self): - with mock.patch.object(self.session, "execute") as m, BatchQuery() as b: - TestTimestampModel.timestamp(timedelta(seconds=10)).batch(b).create(count=1) + with mock.patch.object(self.session, "execute") as m: + with BatchQuery() as b: + TestTimestampModel.timestamp(timedelta(seconds=10)).batch(b).create(count=1) query = m.call_args[0][0].query_string @@ -96,8 +98,9 @@ def test_instance_update_includes_timestamp_in_query(self): "USING TIMESTAMP".should.be.within(m.call_args[0][0].query_string) def test_instance_update_in_batch(self): - with mock.patch.object(self.session, "execute") as m, BatchQuery() as b: - self.instance.batch(b).timestamp(timedelta(seconds=30)).update(count=2) + with mock.patch.object(self.session, "execute") as m: + with BatchQuery() as b: + self.instance.batch(b).timestamp(timedelta(seconds=30)).update(count=2) query = m.call_args[0][0].query_string "USING TIMESTAMP".should.be.within(query) diff --git a/tests/integration/cqlengine/test_transaction.py b/tests/integration/cqlengine/test_transaction.py index bc64dc8452..eec6c631ef 100644 --- a/tests/integration/cqlengine/test_transaction.py +++ b/tests/integration/cqlengine/test_transaction.py @@ -11,10 +11,13 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa import mock import six -from unittest import skipUnless from uuid import uuid4 from cassandra.cqlengine import columns @@ -32,7 +35,7 @@ class TestTransactionModel(Model): text = columns.Text(required=False) -@skipUnless(CASSANDRA_VERSION >= '2.0.0', "transactions only supported on cassandra 2.0 or higher") +@unittest.skipUnless(CASSANDRA_VERSION >= '2.0.0', "transactions only supported on cassandra 2.0 or higher") class TestTransaction(BaseCassEngTestCase): @classmethod diff --git a/tox.ini b/tox.ini index d694e7ab03..f608061da4 100644 --- a/tox.ini +++ b/tox.ini @@ -17,11 +17,6 @@ commands = {envpython} setup.py build_ext --inplace nosetests --verbosity=2 tests/unit/ nosetests --verbosity=2 tests/integration/cqlengine -[testenv:py26] -commands = {envpython} setup.py build_ext --inplace - nosetests --verbosity=2 tests/unit/ -# no cqlengine support for 2.6 right now - [testenv:pypy] deps = {[base]deps} commands = {envpython} setup.py build_ext --inplace From 09f258bdba2710a016cf134336adbac77f22d35b Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 11 Jun 2015 17:17:58 -0500 Subject: [PATCH 1511/3726] Make tox only install unittest2 in python2.6 fixes some issues around test skipping caused by unittest2 in Python 2.7+ --- tox.ini | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index f608061da4..23b2aee1f4 100644 --- a/tox.ini +++ b/tox.ini @@ -4,7 +4,6 @@ envlist = py26,py27,pypy,py33,py34 [base] deps = nose mock - unittest2 PyYAML six @@ -17,6 +16,11 @@ commands = {envpython} setup.py build_ext --inplace nosetests --verbosity=2 tests/unit/ nosetests --verbosity=2 tests/integration/cqlengine +[testenv:py26] +deps = {[testenv]deps} + unittest2 +# test skipping is different in unittest2 for python 2.7+; let's just use it where needed + [testenv:pypy] deps = {[base]deps} commands = {envpython} setup.py build_ext --inplace From d3c9d10a4fa8b9fededd6c3f0917887f01b24655 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 11 Jun 2015 17:38:43 -0500 Subject: [PATCH 1512/3726] Make meta use CQL $$ quoting for Function body --- cassandra/metadata.py | 4 ++-- tests/integration/standard/test_metadata.py | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 843d96f38e..6f8200a8f4 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -1118,14 +1118,14 @@ def as_cql_query(self, formatted=False): for n, t in zip(self.argument_names, self.type_signature)]) typ = self.return_type.cql_parameterized_type() lang = self.language - body = protect_value(self.body) + body = self.body on_null = "CALLED" if self.called_on_null_input else "RETURNS NULL" return "CREATE FUNCTION %(keyspace)s.%(name)s(%(arg_list)s)%(sep)s" \ "%(on_null)s ON NULL INPUT%(sep)s" \ "RETURNS %(typ)s%(sep)s" \ "LANGUAGE %(lang)s%(sep)s" \ - "AS %(body)s;" % locals() + "AS $$%(body)s$$;" % locals() @property def signature(self): diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 2bb88b50cf..cd7def5a97 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -1358,10 +1358,11 @@ def make_function_kwargs(self, called_on_null=True): def test_functions_after_udt(self): """ - Test to to ensure udt's come after functions in in keyspace dump + Test to to ensure functions come after UDTs in in keyspace dump test_functions_after_udt creates a basic function. Then queries that function and make sure that in the results that UDT's are listed before any corresponding functions, when we dump the keyspace + Ideally we would make a function that takes a udt type, but this presently fails because C* c059a56 requires udt to be frozen to create, but does not store meta indicating frozen SEE https://issues.apache.org/jira/browse/CASSANDRA-9186 From b941b1a2f56f3c694ed27d164949e530b60c37a8 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 11 Jun 2015 17:42:50 -0500 Subject: [PATCH 1513/3726] cqle: don't depend on iter order for model attr access test --- tests/integration/cqlengine/model/test_model_io.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/cqlengine/model/test_model_io.py b/tests/integration/cqlengine/model/test_model_io.py index 8de0fe6ab0..574d072dc4 100644 --- a/tests/integration/cqlengine/model/test_model_io.py +++ b/tests/integration/cqlengine/model/test_model_io.py @@ -78,7 +78,7 @@ def test_model_read_as_dict(self): } self.assertEqual(sorted(tm.keys()), sorted(column_dict.keys())) - self.assertItemsEqual(tm.values(), column_dict.values()) + self.assertItemsEqual(sorted(tm.values()), sorted(column_dict.values())) self.assertEqual( sorted(tm.items(), key=itemgetter(0)), sorted(column_dict.items(), key=itemgetter(0))) From d101d3ce60a0abe77cccba3fc5cde42f640e23c7 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 12 Jun 2015 10:02:19 -0500 Subject: [PATCH 1514/3726] cqle test: improve value comparison for reading model values --- tests/integration/cqlengine/model/test_model_io.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/cqlengine/model/test_model_io.py b/tests/integration/cqlengine/model/test_model_io.py index 574d072dc4..35d7b81fee 100644 --- a/tests/integration/cqlengine/model/test_model_io.py +++ b/tests/integration/cqlengine/model/test_model_io.py @@ -78,7 +78,7 @@ def test_model_read_as_dict(self): } self.assertEqual(sorted(tm.keys()), sorted(column_dict.keys())) - self.assertItemsEqual(sorted(tm.values()), sorted(column_dict.values())) + self.assertSetEqual(set(tm.values()), set(column_dict.values())) self.assertEqual( sorted(tm.items(), key=itemgetter(0)), sorted(column_dict.items(), key=itemgetter(0))) From 5ea3f2242fc5af974d4fe119e04f05c4ff8e2ace Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 12 Jun 2015 10:28:00 -0500 Subject: [PATCH 1515/3726] doc: add note about not blocking in future callbacks --- cassandra/cluster.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 834872398b..1c9b6fabba 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -3305,6 +3305,11 @@ def add_callback(self, fn, *args, **kwargs): If the final result has already been seen when this method is called, the callback will be called immediately (before this method returns). + Note: in the case that the result is not available when the callback is added, + the callback is executed by IO event thread. This means that the callback + should not block or attempt further synchronous requests, because no further + IO will be processed until the callback returns. + **Important**: if the callback you attach results in an exception being raised, **the exception will be ignored**, so please ensure your callback handles all error cases that you care about. From f2403d0380cee0a0b670d26784b055c392aa6dca Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 12 Jun 2015 10:40:44 -0500 Subject: [PATCH 1516/3726] cqle doc: deprecated TimeUUID.from_datetime --- cassandra/cqlengine/columns.py | 7 ++++++- docs/api/cassandra/cqlengine/columns.rst | 2 ++ docs/cqlengine/upgrade_guide.rst | 3 +++ 3 files changed, 11 insertions(+), 1 deletion(-) diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index 8f8a6e2546..185cccfbbc 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -557,7 +557,12 @@ def from_datetime(self, dt): :param dt: datetime :type dt: datetime - :return: + :return: uuid1 + + .. deprecated:: 2.6.0 + + Use :func:`cassandra.util.uuid_from_time` + """ msg = "cqlengine.columns.TimeUUID.from_datetime is deprecated. Use cassandra.util.uuid_from_time instead." warnings.warn(msg, DeprecationWarning) diff --git a/docs/api/cassandra/cqlengine/columns.rst b/docs/api/cassandra/cqlengine/columns.rst index 7c4695d323..918ff65fa6 100644 --- a/docs/api/cassandra/cqlengine/columns.rst +++ b/docs/api/cassandra/cqlengine/columns.rst @@ -78,6 +78,8 @@ Columns of all types are initialized by passing :class:`.Column` attributes to t .. autoclass:: TimeUUID(**kwargs) + .. automethod:: from_datetime + .. autoclass:: TinyInt(**kwargs) .. autoclass:: UserDefinedType diff --git a/docs/cqlengine/upgrade_guide.rst b/docs/cqlengine/upgrade_guide.rst index 9af5876426..ee524cc7f8 100644 --- a/docs/cqlengine/upgrade_guide.rst +++ b/docs/cqlengine/upgrade_guide.rst @@ -150,3 +150,6 @@ After:: __discriminator_value__ = 'dog' +TimeUUID.from_datetime +---------------------- +This function is deprecated in favor of the core utility function :func:`~.uuid_from_time`. From fa1cb6809377f9f41643d4e44fb986ef94f2c692 Mon Sep 17 00:00:00 2001 From: Kishan Karunaratne Date: Fri, 12 Jun 2015 16:16:19 -0700 Subject: [PATCH 1517/3726] Windows testing support and stabilizations --- tests/integration/__init__.py | 117 +++++++++++------- tests/integration/cqlengine/__init__.py | 2 +- .../cqlengine/columns/test_validation.py | 2 +- .../integration/cqlengine/model/test_udts.py | 2 +- .../cqlengine/query/test_queryset.py | 7 +- tests/integration/long/test_consistency.py | 22 ++-- tests/integration/long/test_failure_types.py | 41 +++--- tests/integration/long/test_ipv6.py | 68 ++++++---- .../long/test_loadbalancingpolicies.py | 32 ++++- tests/integration/long/test_schema.py | 81 +++++------- tests/integration/long/utils.py | 12 +- tests/integration/standard/test_cluster.py | 14 +-- tests/integration/standard/test_concurrent.py | 56 ++++++--- tests/integration/standard/test_metadata.py | 36 ++---- tests/integration/standard/test_metrics.py | 10 +- tests/integration/standard/test_query.py | 6 + tests/integration/standard/test_types.py | 9 +- tests/integration/standard/test_udts.py | 10 +- .../cqlengine => stress_tests}/test_load.py | 40 +++--- .../test_multi_inserts.py | 20 ++- tests/unit/io/test_asyncorereactor.py | 8 +- tests/unit/io/test_libevreactor.py | 2 +- tests/unit/test_time_util.py | 4 +- 23 files changed, 347 insertions(+), 254 deletions(-) rename tests/{integration/cqlengine => stress_tests}/test_load.py (52%) rename tests/{integration/long => stress_tests}/test_multi_inserts.py (76%) mode change 100755 => 100644 diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 1f43d6d652..3568b1ffc6 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -12,25 +12,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -import time -import traceback - try: import unittest2 as unittest except ImportError: import unittest # noqa -import logging -log = logging.getLogger(__name__) - -import os +import os, six, time, sys, logging, traceback from threading import Event -import six from subprocess import call - from itertools import groupby +from cassandra import OperationTimedOut, ReadTimeout, ReadFailure, WriteTimeout, WriteFailure from cassandra.cluster import Cluster +from cassandra.protocol import ConfigurationException try: from ccmlib.cluster import Cluster as CCMCluster @@ -39,6 +33,8 @@ except ImportError as e: CCMClusterFactory = None +log = logging.getLogger(__name__) + CLUSTER_NAME = 'test_cluster' SINGLE_NODE_CLUSTER_NAME = 'single_node' MULTIDC_CLUSTER_NAME = 'multidc_test_cluster' @@ -84,7 +80,7 @@ def _tuple_version(version_string): USE_CASS_EXTERNAL = bool(os.getenv('USE_CASS_EXTERNAL', False)) -default_cassandra_version = '2.1.3' +default_cassandra_version = '2.1.5' if USE_CASS_EXTERNAL: if CCMClusterFactory: @@ -157,10 +153,21 @@ def remove_cluster(): global CCM_CLUSTER if CCM_CLUSTER: - log.debug("removing cluster %s", CCM_CLUSTER.name) - CCM_CLUSTER.remove() - CCM_CLUSTER = None - + log.debug("Removing cluster {0}".format(CCM_CLUSTER.name)) + tries = 0 + while tries < 100: + try: + CCM_CLUSTER.remove() + CCM_CLUSTER = None + return + except WindowsError: + ex_type, ex, tb = sys.exc_info() + log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) + del tb + tries += 1 + time.sleep(1) + + raise RuntimeError("Failed to remove cluster after 100 attempts") def is_current_cluster(cluster_name, node_counts): global CCM_CLUSTER @@ -175,49 +182,55 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True): global CCM_CLUSTER if USE_CASS_EXTERNAL: if CCM_CLUSTER: - log.debug("Using external ccm cluster %s", CCM_CLUSTER.name) + log.debug("Using external CCM cluster {0}".format(CCM_CLUSTER.name)) else: log.debug("Using unnamed external cluster") return if is_current_cluster(cluster_name, nodes): - log.debug("Using existing cluster %s", cluster_name) - return - - if CCM_CLUSTER: - log.debug("Stopping cluster %s", CCM_CLUSTER.name) - CCM_CLUSTER.stop() + log.debug("Using existing cluster, matching topology: {0}".format(cluster_name)) + else: + if CCM_CLUSTER: + log.debug("Stopping existing cluster, topology mismatch: {0}".format(CCM_CLUSTER.name)) + CCM_CLUSTER.stop() - try: try: - cluster = CCMClusterFactory.load(path, cluster_name) - log.debug("Found existing ccm %s cluster; clearing", cluster_name) - cluster.clear() - cluster.set_install_dir(**CCM_KWARGS) + CCM_CLUSTER = CCMClusterFactory.load(path, cluster_name) + log.debug("Found existing CCM cluster, {0}; clearing.".format(cluster_name)) + CCM_CLUSTER.clear() + CCM_CLUSTER.set_install_dir(**CCM_KWARGS) except Exception: - log.debug("Creating new ccm %s cluster with %s", cluster_name, CCM_KWARGS) - cluster = CCMCluster(path, cluster_name, **CCM_KWARGS) - cluster.set_configuration_options({'start_native_transport': True}) + ex_type, ex, tb = sys.exc_info() + log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) + del tb + + log.debug("Creating new CCM cluster, {0}, with args {1}".format(cluster_name, CCM_KWARGS)) + CCM_CLUSTER = CCMCluster(path, cluster_name, **CCM_KWARGS) + CCM_CLUSTER.set_configuration_options({'start_native_transport': True}) if CASSANDRA_VERSION >= '2.2': - cluster.set_configuration_options({'enable_user_defined_functions': True}) + CCM_CLUSTER.set_configuration_options({'enable_user_defined_functions': True}) common.switch_cluster(path, cluster_name) - cluster.populate(nodes, ipformat=ipformat) - + CCM_CLUSTER.populate(nodes, ipformat=ipformat) + try: jvm_args = [] # This will enable the Mirroring query handler which will echo our custom payload k,v pairs back if PROTOCOL_VERSION >= 4: jvm_args = [" -Dcassandra.custom_query_handler_class=org.apache.cassandra.cql3.CustomPayloadMirroringQueryHandler"] if start: - log.debug("Starting ccm %s cluster", cluster_name) - cluster.start(wait_for_binary_proto=True, wait_other_notice=True, jvm_args=jvm_args) + log.debug("Starting CCM cluster: {0}".format(cluster_name)) + CCM_CLUSTER.start(wait_for_binary_proto=True, wait_other_notice=True, jvm_args=jvm_args) setup_keyspace(ipformat=ipformat) - - CCM_CLUSTER = cluster except Exception: - log.exception("Failed to start ccm cluster. Removing cluster.") + log.exception("Failed to start CCM cluster; removing cluster.") + + if os.name == "nt": + if CCM_CLUSTER: + for node in CCM_CLUSTER.nodes.itervalues(): + os.system("taskkill /F /PID " + str(node.pid)) + else: + call(["pkill", "-9", "-f", ".ccm"]) remove_cluster() - call(["pkill", "-9", "-f", ".ccm"]) raise @@ -240,6 +253,22 @@ def teardown_package(): log.warning('Did not find cluster: %s' % cluster_name) +def execute_until_pass(session, query): + tries = 0 + while tries < 100: + try: + return session.execute(query) + except ConfigurationException: + # keyspace/table was already created/dropped + return + except (OperationTimedOut, ReadTimeout, ReadFailure, WriteTimeout, WriteFailure): + ex_type, ex, tb = sys.exc_info() + log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) + del tb + tries += 1 + + raise RuntimeError("Failed to execute query after 100 attempts: {0}".format(query)) + def setup_keyspace(ipformat=None): # wait for nodes to startup time.sleep(10) @@ -251,32 +280,32 @@ def setup_keyspace(ipformat=None): session = cluster.connect() try: - results = session.execute("SELECT keyspace_name FROM system.schema_keyspaces") + results = execute_until_pass(session, "SELECT keyspace_name FROM system.schema_keyspaces") existing_keyspaces = [row[0] for row in results] for ksname in ('test1rf', 'test2rf', 'test3rf'): if ksname in existing_keyspaces: - session.execute("DROP KEYSPACE %s" % ksname) + execute_until_pass(session, "DROP KEYSPACE %s" % ksname) ddl = ''' CREATE KEYSPACE test3rf WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '3'}''' - session.execute(ddl) + execute_until_pass(session, ddl) ddl = ''' CREATE KEYSPACE test2rf WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '2'}''' - session.execute(ddl) + execute_until_pass(session, ddl) ddl = ''' CREATE KEYSPACE test1rf WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'}''' - session.execute(ddl) + execute_until_pass(session, ddl) ddl = ''' CREATE TABLE test3rf.test ( k int PRIMARY KEY, v int )''' - session.execute(ddl) + execute_until_pass(session, ddl) except Exception: traceback.print_exc() diff --git a/tests/integration/cqlengine/__init__.py b/tests/integration/cqlengine/__init__.py index f0322c6bc8..c53884fcb2 100644 --- a/tests/integration/cqlengine/__init__.py +++ b/tests/integration/cqlengine/__init__.py @@ -28,7 +28,7 @@ def setup_package(): use_single_node() keyspace = 'cqlengine_test' - connection.setup(['localhost'], + connection.setup(['127.0.0.1'], protocol_version=PROTOCOL_VERSION, default_keyspace=keyspace) diff --git a/tests/integration/cqlengine/columns/test_validation.py b/tests/integration/cqlengine/columns/test_validation.py index 5426548e5e..b645755574 100644 --- a/tests/integration/cqlengine/columns/test_validation.py +++ b/tests/integration/cqlengine/columns/test_validation.py @@ -358,7 +358,7 @@ class TestTimeUUIDFromDatetime(BaseCassEngTestCase): def test_conversion_specific_date(self): dt = datetime(1981, 7, 11, microsecond=555000) - uuid = TimeUUID.from_datetime(dt) + uuid = util.uuid_from_time(dt) from uuid import UUID assert isinstance(uuid, UUID) diff --git a/tests/integration/cqlengine/model/test_udts.py b/tests/integration/cqlengine/model/test_udts.py index 25c3ccee44..86cc4596d8 100644 --- a/tests/integration/cqlengine/model/test_udts.py +++ b/tests/integration/cqlengine/model/test_udts.py @@ -26,7 +26,7 @@ from cassandra.cqlengine.management import sync_table, sync_type, create_keyspace_simple, drop_keyspace from cassandra.util import Date, Time -from tests.integration import get_server_versions, PROTOCOL_VERSION +from tests.integration import PROTOCOL_VERSION from tests.integration.cqlengine.base import BaseCassEngTestCase diff --git a/tests/integration/cqlengine/query/test_queryset.py b/tests/integration/cqlengine/query/test_queryset.py index 6d5fc926c5..cbaea21f2d 100644 --- a/tests/integration/cqlengine/query/test_queryset.py +++ b/tests/integration/cqlengine/query/test_queryset.py @@ -37,6 +37,7 @@ from cassandra.cqlengine import statements from cassandra.cqlengine import operators +from cassandra.util import uuid_from_time from cassandra.cqlengine.connection import get_session from tests.integration import PROTOCOL_VERSION @@ -582,17 +583,17 @@ def test_tzaware_datetime_support(self): TimeUUIDQueryModel.create( partition=pk, - time=columns.TimeUUID.from_datetime(midpoint_utc - timedelta(minutes=1)), + time=uuid_from_time(midpoint_utc - timedelta(minutes=1)), data='1') TimeUUIDQueryModel.create( partition=pk, - time=columns.TimeUUID.from_datetime(midpoint_utc), + time=uuid_from_time(midpoint_utc), data='2') TimeUUIDQueryModel.create( partition=pk, - time=columns.TimeUUID.from_datetime(midpoint_utc + timedelta(minutes=1)), + time=uuid_from_time(midpoint_utc + timedelta(minutes=1)), data='3') assert ['1', '2'] == [o.data for o in TimeUUIDQueryModel.filter( diff --git a/tests/integration/long/test_consistency.py b/tests/integration/long/test_consistency.py index db752cc4bd..1c860e9930 100644 --- a/tests/integration/long/test_consistency.py +++ b/tests/integration/long/test_consistency.py @@ -12,13 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -import struct, logging, sys, traceback, time +import struct, time, traceback, sys, logging from cassandra import ConsistencyLevel, OperationTimedOut, ReadTimeout, WriteTimeout, Unavailable from cassandra.cluster import Cluster from cassandra.policies import TokenAwarePolicy, RoundRobinPolicy, DowngradingConsistencyRetryPolicy from cassandra.query import SimpleStatement -from tests.integration import use_singledc, PROTOCOL_VERSION +from tests.integration import use_singledc, PROTOCOL_VERSION, execute_until_pass from tests.integration.long.utils import (force_stop, create_schema, wait_for_down, wait_for_up, start, CoordinatorStats) @@ -28,8 +28,6 @@ except ImportError: import unittest # noqa -log = logging.getLogger(__name__) - ALL_CONSISTENCY_LEVELS = set([ ConsistencyLevel.ANY, ConsistencyLevel.ONE, ConsistencyLevel.TWO, ConsistencyLevel.QUORUM, ConsistencyLevel.THREE, @@ -41,6 +39,8 @@ SINGLE_DC_CONSISTENCY_LEVELS = ALL_CONSISTENCY_LEVELS - MULTI_DC_CONSISTENCY_LEVELS +log = logging.getLogger(__name__) + def setup_module(): use_singledc() @@ -65,15 +65,7 @@ def _insert(self, session, keyspace, count, consistency_level=ConsistencyLevel.O for i in range(count): ss = SimpleStatement('INSERT INTO cf(k, i) VALUES (0, 0)', consistency_level=consistency_level) - while True: - try: - session.execute(ss) - break - except (OperationTimedOut, WriteTimeout): - ex_type, ex, tb = sys.exc_info() - log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) - del tb - time.sleep(1) + execute_until_pass(session, ss) def _query(self, session, keyspace, count, consistency_level=ConsistencyLevel.ONE): routing_key = struct.pack('>i', 0) @@ -81,7 +73,10 @@ def _query(self, session, keyspace, count, consistency_level=ConsistencyLevel.ON ss = SimpleStatement('SELECT * FROM cf WHERE k = 0', consistency_level=consistency_level, routing_key=routing_key) + tries = 0 while True: + if tries > 100: + raise RuntimeError("Failed to execute query after 100 attempts: {0}".format(ss)) try: self.coordinator_stats.add_coordinator(session.execute_async(ss)) break @@ -89,6 +84,7 @@ def _query(self, session, keyspace, count, consistency_level=ConsistencyLevel.ON ex_type, ex, tb = sys.exc_info() log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) del tb + tries += 1 time.sleep(1) def _assert_writes_succeed(self, session, keyspace, consistency_levels): diff --git a/tests/integration/long/test_failure_types.py b/tests/integration/long/test_failure_types.py index a927dc95f9..64d06f46a3 100644 --- a/tests/integration/long/test_failure_types.py +++ b/tests/integration/long/test_failure_types.py @@ -12,19 +12,21 @@ # See the License for the specific language governing permissions and # limitations under the License. +import sys, logging, traceback + +from cassandra import ConsistencyLevel, OperationTimedOut, ReadTimeout, WriteTimeout, ReadFailure, WriteFailure,\ + FunctionFailure +from cassandra.cluster import Cluster +from cassandra.concurrent import execute_concurrent_with_args +from cassandra.query import SimpleStatement +from tests.integration import use_singledc, PROTOCOL_VERSION, get_cluster, setup_keyspace, remove_cluster try: import unittest2 as unittest except ImportError: import unittest - -from cassandra.cluster import Cluster -from cassandra import ConsistencyLevel -from cassandra import WriteFailure, ReadFailure, FunctionFailure -from cassandra.concurrent import execute_concurrent_with_args -from cassandra.query import SimpleStatement -from tests.integration import use_singledc, PROTOCOL_VERSION, get_cluster, setup_keyspace +log = logging.getLogger(__name__) def setup_module(): @@ -48,14 +50,10 @@ def setup_module(): def teardown_module(): """ The rest of the tests don't need custom tombstones - reset the config options so as to not mess with other tests. + remove the cluster so as to not interfere with other tests. """ if PROTOCOL_VERSION >= 4: - ccm_cluster = get_cluster() - config_options = {} - ccm_cluster.set_configuration_options(config_options) - if ccm_cluster is not None: - ccm_cluster.stop() + remove_cluster() class ClientExceptionTests(unittest.TestCase): @@ -83,6 +81,19 @@ def tearDown(self): # Restart the nodes to fully functional again self.setFailingNodes(failing_nodes, "testksfail") + def execute_concurrent_args_helper(self, session, query, params): + tries = 0 + while tries < 100: + try: + return execute_concurrent_with_args(session, query, params, concurrency=50) + except (ReadTimeout, WriteTimeout, OperationTimedOut, ReadFailure, WriteFailure): + ex_type, ex, tb = sys.exc_info() + log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) + del tb + tries += 1 + + raise RuntimeError("Failed to execute query after 100 attempts: {0}".format(query)) + def setFailingNodes(self, failing_nodes, keyspace): """ This method will take in a set of failing nodes, and toggle all of the nodes in the provided list to fail @@ -210,11 +221,11 @@ def test_tombstone_overflow_read_failure(self): statement = self.session.prepare("INSERT INTO test3rf.test2 (k, v0,v1) VALUES (1,?,1)") parameters = [(x,) for x in range(3000)] - execute_concurrent_with_args(self.session, statement, parameters, concurrency=50) + self.execute_concurrent_args_helper(self.session, statement, parameters) statement = self.session.prepare("DELETE v1 FROM test3rf.test2 WHERE k = 1 AND v0 =?") parameters = [(x,) for x in range(2001)] - execute_concurrent_with_args(self.session, statement, parameters, concurrency=50) + self.execute_concurrent_args_helper(self.session, statement, parameters) self._perform_cql_statement( """ diff --git a/tests/integration/long/test_ipv6.py b/tests/integration/long/test_ipv6.py index 3f416561ad..e17f22aa07 100644 --- a/tests/integration/long/test_ipv6.py +++ b/tests/integration/long/test_ipv6.py @@ -12,27 +12,29 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging -import os -import socket +import os, socket +from ccmlib import common from cassandra.cluster import Cluster, NoHostAvailable -from ccmlib import common -from tests.integration import use_cluster, remove_cluster, PROTOCOL_VERSION from cassandra.io.asyncorereactor import AsyncoreConnection -try: - from cassandra.io.libevreactor import LibevConnection -except ImportError: - LibevConnection = None +from tests import is_monkey_patched +from tests.integration import use_cluster, remove_cluster, PROTOCOL_VERSION +if is_monkey_patched(): + LibevConnection = -1 + AsyncoreConnection = -1 +else: + try: + from cassandra.io.libevreactor import LibevConnection + except ImportError: + LibevConnection = None try: import unittest2 as unittest except ImportError: import unittest # noqa -log = logging.getLogger(__name__) # If more modules do IPV6 testing, this can be moved down to integration.__init__. # For now, just keeping the clutter here @@ -40,12 +42,12 @@ def setup_module(module): - validate_ccm_viable() - validate_host_viable() - # We use a dedicated cluster (instead of common singledc, as in other tests) because - # it's most likely that the test host will only have one local ipv6 address (::1) - # singledc has three - use_cluster(IPV6_CLUSTER_NAME, [1], ipformat='::%d') + if os.name != "nt": + validate_host_viable() + # We use a dedicated cluster (instead of common singledc, as in other tests) because + # it's most likely that the test host will only have one local ipv6 address (::1) + # singledc has three + use_cluster(IPV6_CLUSTER_NAME, [1], ipformat='::%d') def teardown_module(): @@ -73,7 +75,8 @@ class IPV6ConnectionTest(object): connection_class = None def test_connect(self): - cluster = Cluster(connection_class=self.connection_class, contact_points=['::1'], protocol_version=PROTOCOL_VERSION) + cluster = Cluster(connection_class=self.connection_class, contact_points=['::1'], connect_timeout=10, + protocol_version=PROTOCOL_VERSION) session = cluster.connect() future = session.execute_async("SELECT * FROM system.local") future.result() @@ -81,26 +84,41 @@ def test_connect(self): cluster.shutdown() def test_error(self): - cluster = Cluster(connection_class=self.connection_class, contact_points=['::1'], port=9043, protocol_version=PROTOCOL_VERSION) - self.assertRaisesRegexp(NoHostAvailable, '\(\'Unable to connect.*%s.*::1\', 9043.*Connection refused.*' % os.errno.ECONNREFUSED, cluster.connect) + cluster = Cluster(connection_class=self.connection_class, contact_points=['::1'], port=9043, + connect_timeout=10, protocol_version=PROTOCOL_VERSION) + self.assertRaisesRegexp(NoHostAvailable, '\(\'Unable to connect.*%s.*::1\', 9043.*Connection refused.*' + % os.errno.ECONNREFUSED, cluster.connect) def test_error_multiple(self): if len(socket.getaddrinfo('localhost', 9043, socket.AF_UNSPEC, socket.SOCK_STREAM)) < 2: raise unittest.SkipTest('localhost only resolves one address') - cluster = Cluster(connection_class=self.connection_class, contact_points=['localhost'], port=9043, protocol_version=PROTOCOL_VERSION) - self.assertRaisesRegexp(NoHostAvailable, '\(\'Unable to connect.*Tried connecting to \[\(.*\(.*\].*Last error', cluster.connect) + cluster = Cluster(connection_class=self.connection_class, contact_points=['localhost'], port=9043, + connect_timeout=10, protocol_version=PROTOCOL_VERSION) + self.assertRaisesRegexp(NoHostAvailable, '\(\'Unable to connect.*Tried connecting to \[\(.*\(.*\].*Last error', + cluster.connect) class LibevConnectionTests(IPV6ConnectionTest, unittest.TestCase): connection_class = LibevConnection - @classmethod - def setup_class(cls): - if LibevConnection is None: - raise unittest.SkipTest('libev does not appear to be installed properly') + def setUp(self): + if os.name == "nt": + raise unittest.SkipTest("IPv6 is currently not supported under Windows") + + if LibevConnection == -1: + raise unittest.SkipTest("Can't test libev with monkey patching") + elif LibevConnection is None: + raise unittest.SkipTest("Libev does not appear to be installed properly") class AsyncoreConnectionTests(IPV6ConnectionTest, unittest.TestCase): connection_class = AsyncoreConnection + + def setUp(self): + if os.name == "nt": + raise unittest.SkipTest("IPv6 is currently not supported under Windows") + + if AsyncoreConnection == -1: + raise unittest.SkipTest("Can't test asyncore with monkey patching") \ No newline at end of file diff --git a/tests/integration/long/test_loadbalancingpolicies.py b/tests/integration/long/test_loadbalancingpolicies.py index 7b2b87957d..465f9d763d 100644 --- a/tests/integration/long/test_loadbalancingpolicies.py +++ b/tests/integration/long/test_loadbalancingpolicies.py @@ -14,7 +14,8 @@ import struct, time, logging, sys, traceback -from cassandra import ConsistencyLevel, Unavailable, OperationTimedOut, ReadTimeout +from cassandra import ConsistencyLevel, Unavailable, OperationTimedOut, ReadTimeout, ReadFailure, \ + WriteTimeout, WriteFailure from cassandra.cluster import Cluster, NoHostAvailable from cassandra.concurrent import execute_concurrent_with_args from cassandra.metadata import murmur3 @@ -50,9 +51,20 @@ def teardown_class(cls): def _insert(self, session, keyspace, count=12, consistency_level=ConsistencyLevel.ONE): session.execute('USE %s' % keyspace) - ss = SimpleStatement('INSERT INTO cf(k, i) VALUES (0, 0)', - consistency_level=consistency_level) - execute_concurrent_with_args(session, ss, [None] * count) + ss = SimpleStatement('INSERT INTO cf(k, i) VALUES (0, 0)', consistency_level=consistency_level) + + tries = 0 + while tries < 100: + try: + execute_concurrent_with_args(session, ss, [None] * count) + return + except (OperationTimedOut, WriteTimeout, WriteFailure): + ex_type, ex, tb = sys.exc_info() + log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) + del tb + tries += 1 + + raise RuntimeError("Failed to execute query after 100 attempts: {0}".format(ss)) def _query(self, session, keyspace, count=12, consistency_level=ConsistencyLevel.ONE, use_prepared=False): @@ -62,28 +74,36 @@ def _query(self, session, keyspace, count=12, self.prepared = session.prepare(query_string) for i in range(count): + tries = 0 while True: + if tries > 100: + raise RuntimeError("Failed to execute query after 100 attempts: {0}".format(self.prepared)) try: self.coordinator_stats.add_coordinator(session.execute_async(self.prepared.bind((0,)))) break - except (OperationTimedOut, ReadTimeout): + except (OperationTimedOut, ReadTimeout, ReadFailure): ex_type, ex, tb = sys.exc_info() log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) del tb + tries += 1 else: routing_key = struct.pack('>i', 0) for i in range(count): ss = SimpleStatement('SELECT * FROM %s.cf WHERE k = 0' % keyspace, consistency_level=consistency_level, routing_key=routing_key) + tries = 0 while True: + if tries > 100: + raise RuntimeError("Failed to execute query after 100 attempts: {0}".format(ss)) try: self.coordinator_stats.add_coordinator(session.execute_async(ss)) break - except (OperationTimedOut, ReadTimeout): + except (OperationTimedOut, ReadTimeout, ReadFailure): ex_type, ex, tb = sys.exc_info() log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) del tb + tries += 1 def test_token_aware_is_used_by_default(self): """ diff --git a/tests/integration/long/test_schema.py b/tests/integration/long/test_schema.py index 806726cf32..7da5203fd6 100644 --- a/tests/integration/long/test_schema.py +++ b/tests/integration/long/test_schema.py @@ -12,13 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging, sys, traceback +import logging -from cassandra import ConsistencyLevel, OperationTimedOut +from cassandra import ConsistencyLevel, AlreadyExists from cassandra.cluster import Cluster -from cassandra.protocol import ConfigurationException from cassandra.query import SimpleStatement -from tests.integration import use_singledc, PROTOCOL_VERSION + +from tests.integration import use_singledc, PROTOCOL_VERSION, execute_until_pass try: import unittest2 as unittest @@ -54,29 +54,29 @@ def test_recreates(self): for keyspace_number in range(5): keyspace = "ks_{0}".format(keyspace_number) - results = session.execute("SELECT keyspace_name FROM system.schema_keyspaces") + results = execute_until_pass(session, "SELECT keyspace_name FROM system.schema_keyspaces") existing_keyspaces = [row[0] for row in results] if keyspace in existing_keyspaces: drop = "DROP KEYSPACE {0}".format(keyspace) log.debug(drop) - session.execute(drop) + execute_until_pass(session, drop) create = "CREATE KEYSPACE {0} WITH replication = {{'class': 'SimpleStrategy', 'replication_factor': 3}}".format(keyspace) log.debug(create) - session.execute(create) + execute_until_pass(session, create) create = "CREATE TABLE {0}.cf (k int PRIMARY KEY, i int)".format(keyspace) log.debug(create) - session.execute(create) + execute_until_pass(session, create) use = "USE {0}".format(keyspace) log.debug(use) - session.execute(use) + execute_until_pass(session, use) insert = "INSERT INTO cf (k, i) VALUES (0, 0)" log.debug(insert) ss = SimpleStatement(insert, consistency_level=ConsistencyLevel.QUORUM) - session.execute(ss) + execute_until_pass(session, ss) def test_for_schema_disagreements_different_keyspaces(self): """ @@ -86,28 +86,13 @@ def test_for_schema_disagreements_different_keyspaces(self): session = self.session for i in xrange(30): - try: - session.execute("CREATE KEYSPACE test_{0} WITH replication = {{'class': 'SimpleStrategy', 'replication_factor': 1}}".format(i)) - session.execute("CREATE TABLE test_{0}.cf (key int PRIMARY KEY, value int)".format(i)) - - for j in xrange(100): - session.execute("INSERT INTO test_{0}.cf (key, value) VALUES ({1}, {1})".format(i, j)) - except OperationTimedOut: - ex_type, ex, tb = sys.exc_info() - log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) - del tb - finally: - while True: - try: - session.execute("DROP KEYSPACE test_{0}".format(i)) - break - except OperationTimedOut: - ex_type, ex, tb = sys.exc_info() - log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) - del tb - except ConfigurationException: - # We're good, the keyspace was never created due to OperationTimedOut - break + execute_until_pass(session, "CREATE KEYSPACE test_{0} WITH replication = {{'class': 'SimpleStrategy', 'replication_factor': 1}}".format(i)) + execute_until_pass(session, "CREATE TABLE test_{0}.cf (key int PRIMARY KEY, value int)".format(i)) + + for j in xrange(100): + execute_until_pass(session, "INSERT INTO test_{0}.cf (key, value) VALUES ({1}, {1})".format(i, j)) + + execute_until_pass(session, "DROP KEYSPACE test_{0}".format(i)) def test_for_schema_disagreements_same_keyspace(self): """ @@ -119,24 +104,14 @@ def test_for_schema_disagreements_same_keyspace(self): for i in xrange(30): try: - session.execute("CREATE KEYSPACE test WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}") - session.execute("CREATE TABLE test.cf (key int PRIMARY KEY, value int)") - - for j in xrange(100): - session.execute("INSERT INTO test.cf (key, value) VALUES ({0}, {0})".format(j)) - except OperationTimedOut: - ex_type, ex, tb = sys.exc_info() - log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) - del tb - finally: - while True: - try: - session.execute("DROP KEYSPACE test") - break - except OperationTimedOut: - ex_type, ex, tb = sys.exc_info() - log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) - del tb - except ConfigurationException: - # We're good, the keyspace was never created due to OperationTimedOut - break + execute_until_pass(session, "CREATE KEYSPACE test WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}") + except AlreadyExists: + execute_until_pass(session, "DROP KEYSPACE test") + execute_until_pass(session, "CREATE KEYSPACE test WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}") + + execute_until_pass(session, "CREATE TABLE test.cf (key int PRIMARY KEY, value int)") + + for j in xrange(100): + execute_until_pass(session, "INSERT INTO test.cf (key, value) VALUES ({0}, {0})".format(j)) + + execute_until_pass(session, "DROP KEYSPACE test") diff --git a/tests/integration/long/utils.py b/tests/integration/long/utils.py index e0e1e530d7..a3fb098cb5 100644 --- a/tests/integration/long/utils.py +++ b/tests/integration/long/utils.py @@ -131,23 +131,31 @@ def ring(node): def wait_for_up(cluster, node, wait=True): - while True: + tries = 0 + while tries < 100: host = cluster.metadata.get_host(IP_FORMAT % node) if host and host.is_up: log.debug("Done waiting for node %s to be up", node) return else: log.debug("Host is still marked down, waiting") + tries += 1 time.sleep(1) + raise RuntimeError("Host {0} is not up after 100 attempts".format(IP_FORMAT.format(node))) + def wait_for_down(cluster, node, wait=True): log.debug("Waiting for node %s to be down", node) - while True: + tries = 0 + while tries < 100: host = cluster.metadata.get_host(IP_FORMAT % node) if not host or not host.is_up: log.debug("Done waiting for node %s to be down", node) return else: log.debug("Host is still marked up, waiting") + tries += 1 time.sleep(1) + + raise RuntimeError("Host {0} is not down after 100 attempts".format(IP_FORMAT.format(node))) \ No newline at end of file diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index bf6bb1d476..045809a6f2 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -30,7 +30,7 @@ WhiteListRoundRobinPolicy) from cassandra.query import SimpleStatement, TraceUnavailable -from tests.integration import use_singledc, PROTOCOL_VERSION, get_server_versions, get_node +from tests.integration import use_singledc, PROTOCOL_VERSION, get_server_versions, get_node, execute_until_pass from tests.integration.util import assert_quiescent_pool_state @@ -72,14 +72,14 @@ def test_basic(self): cluster = Cluster(protocol_version=PROTOCOL_VERSION) session = cluster.connect() - result = session.execute( + result = execute_until_pass(session, """ CREATE KEYSPACE clustertests WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'} """) self.assertEqual(None, result) - result = session.execute( + result = execute_until_pass(session, """ CREATE TABLE clustertests.cf0 ( a text, @@ -99,7 +99,7 @@ def test_basic(self): result = session.execute("SELECT * FROM clustertests.cf0") self.assertEqual([('a', 'b', 'c')], result) - session.execute("DROP KEYSPACE clustertests") + execute_until_pass(session, "DROP KEYSPACE clustertests") cluster.shutdown() @@ -227,7 +227,7 @@ def test_submit_schema_refresh(self): other_cluster = Cluster(protocol_version=PROTOCOL_VERSION) session = other_cluster.connect() - session.execute( + execute_until_pass(session, """ CREATE KEYSPACE newkeyspace WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'} @@ -238,7 +238,7 @@ def test_submit_schema_refresh(self): self.assertIn("newkeyspace", cluster.metadata.keyspaces) - session.execute("DROP KEYSPACE newkeyspace") + execute_until_pass(session, "DROP KEYSPACE newkeyspace") cluster.shutdown() other_cluster.shutdown() @@ -303,7 +303,7 @@ def test_refresh_schema_type(self): keyspace_name = 'test1rf' type_name = self._testMethodName - session.execute('CREATE TYPE IF NOT EXISTS %s.%s (one int, two text)' % (keyspace_name, type_name)) + execute_until_pass(session, 'CREATE TYPE IF NOT EXISTS %s.%s (one int, two text)' % (keyspace_name, type_name)) original_meta = cluster.metadata.keyspaces original_test1rf_meta = original_meta[keyspace_name] original_type_meta = original_test1rf_meta.user_types[type_name] diff --git a/tests/integration/standard/test_concurrent.py b/tests/integration/standard/test_concurrent.py index 8b2bd8e696..9a82338348 100644 --- a/tests/integration/standard/test_concurrent.py +++ b/tests/integration/standard/test_concurrent.py @@ -12,6 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. +from itertools import cycle +import sys, logging, traceback + +from cassandra import InvalidRequest, ConsistencyLevel, ReadTimeout, WriteTimeout, OperationTimedOut, \ + ReadFailure, WriteFailure +from cassandra.cluster import Cluster, PagedResult +from cassandra.concurrent import execute_concurrent, execute_concurrent_with_args +from cassandra.policies import HostDistance +from cassandra.query import tuple_factory, SimpleStatement + from tests.integration import use_singledc, PROTOCOL_VERSION try: @@ -19,14 +29,7 @@ except ImportError: import unittest # noqa -from itertools import cycle - -from cassandra import InvalidRequest, ConsistencyLevel -from cassandra.cluster import Cluster, PagedResult -from cassandra.concurrent import (execute_concurrent, - execute_concurrent_with_args) -from cassandra.policies import HostDistance -from cassandra.query import tuple_factory, SimpleStatement +log = logging.getLogger(__name__) def setup_module(): @@ -47,6 +50,31 @@ def setUpClass(cls): def tearDownClass(cls): cls.cluster.shutdown() + def execute_concurrent_helper(self, session, query): + count = 0 + while count < 100: + try: + return execute_concurrent(session, query) + except (ReadTimeout, WriteTimeout, OperationTimedOut, ReadFailure, WriteFailure): + ex_type, ex, tb = sys.exc_info() + log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) + del tb + count += 1 + + raise RuntimeError("Failed to execute query after 100 attempts: {0}".format(query)) + + def execute_concurrent_args_helper(self, session, query, params): + count = 0 + while count < 100: + try: + return execute_concurrent_with_args(session, query, params) + except (ReadTimeout, WriteTimeout, OperationTimedOut, ReadFailure, WriteFailure): + ex_type, ex, tb = sys.exc_info() + log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) + del tb + + raise RuntimeError("Failed to execute query after 100 attempts: {0}".format(query)) + def test_execute_concurrent(self): for num_statements in (0, 1, 2, 7, 10, 99, 100, 101, 199, 200, 201): # write @@ -56,7 +84,7 @@ def test_execute_concurrent(self): statements = cycle((statement, )) parameters = [(i, i) for i in range(num_statements)] - results = execute_concurrent(self.session, list(zip(statements, parameters))) + results = self.execute_concurrent_helper(self.session, list(zip(statements, parameters))) self.assertEqual(num_statements, len(results)) self.assertEqual([(True, None)] * num_statements, results) @@ -67,7 +95,7 @@ def test_execute_concurrent(self): statements = cycle((statement, )) parameters = [(i, ) for i in range(num_statements)] - results = execute_concurrent(self.session, list(zip(statements, parameters))) + results = self.execute_concurrent_helper(self.session, list(zip(statements, parameters))) self.assertEqual(num_statements, len(results)) self.assertEqual([(True, [(i,)]) for i in range(num_statements)], results) @@ -78,7 +106,7 @@ def test_execute_concurrent_with_args(self): consistency_level=ConsistencyLevel.QUORUM) parameters = [(i, i) for i in range(num_statements)] - results = execute_concurrent_with_args(self.session, statement, parameters) + results = self.execute_concurrent_args_helper(self.session, statement, parameters) self.assertEqual(num_statements, len(results)) self.assertEqual([(True, None)] * num_statements, results) @@ -88,7 +116,7 @@ def test_execute_concurrent_with_args(self): consistency_level=ConsistencyLevel.QUORUM) parameters = [(i, ) for i in range(num_statements)] - results = execute_concurrent_with_args(self.session, statement, parameters) + results = self.execute_concurrent_args_helper(self.session, statement, parameters) self.assertEqual(num_statements, len(results)) self.assertEqual([(True, [(i,)]) for i in range(num_statements)], results) @@ -104,7 +132,7 @@ def test_execute_concurrent_paged_result(self): consistency_level=ConsistencyLevel.QUORUM) parameters = [(i, i) for i in range(num_statements)] - results = execute_concurrent_with_args(self.session, statement, parameters) + results = self.execute_concurrent_args_helper(self.session, statement, parameters) self.assertEqual(num_statements, len(results)) self.assertEqual([(True, None)] * num_statements, results) @@ -115,7 +143,7 @@ def test_execute_concurrent_paged_result(self): fetch_size=int(num_statements / 2)) parameters = [(i, ) for i in range(num_statements)] - results = execute_concurrent_with_args(self.session, statement, [(num_statements,)]) + results = self.execute_concurrent_args_helper(self.session, statement, [(num_statements,)]) self.assertEqual(1, len(results)) self.assertTrue(results[0][0]) result = results[0][1] diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 2bb88b50cf..ae1a37a09c 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -17,15 +17,10 @@ except ImportError: import unittest # noqa -import difflib +import difflib, six, sys from mock import Mock -import logging -import six -import sys -import traceback -from cassandra import AlreadyExists, OperationTimedOut, SignatureDescriptor, UserFunctionDescriptor, \ - UserAggregateDescriptor +from cassandra import AlreadyExists, SignatureDescriptor, UserFunctionDescriptor, UserAggregateDescriptor from cassandra.cluster import Cluster from cassandra.cqltypes import DoubleType, Int32Type, ListType, UTF8Type, MapType @@ -35,10 +30,7 @@ from cassandra.policies import SimpleConvictionPolicy from cassandra.pool import Host -from tests.integration import (get_cluster, use_singledc, PROTOCOL_VERSION, - get_server_versions) - -log = logging.getLogger(__name__) +from tests.integration import get_cluster, use_singledc, PROTOCOL_VERSION, get_server_versions, execute_until_pass def setup_module(): @@ -58,18 +50,12 @@ def setUp(self): self.cluster = Cluster(protocol_version=PROTOCOL_VERSION) self.session = self.cluster.connect() - self.session.execute("CREATE KEYSPACE schemametadatatest WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'}") + execute_until_pass(self.session, + "CREATE KEYSPACE schemametadatatest WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'}") def tearDown(self): - while True: - try: - self.session.execute("DROP KEYSPACE schemametadatatest") - self.cluster.shutdown() - break - except OperationTimedOut: - ex_type, ex, tb = sys.exc_info() - log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) - del tb + execute_until_pass(self.session, "DROP KEYSPACE schemametadatatest") + self.cluster.shutdown() def make_create_statement(self, partition_cols, clustering_cols=None, other_cols=None, compact=False): clustering_cols = clustering_cols or [] @@ -107,13 +93,13 @@ def make_create_statement(self, partition_cols, clustering_cols=None, other_cols def check_create_statement(self, tablemeta, original): recreate = tablemeta.as_cql_query(formatted=False) self.assertEqual(original, recreate[:len(original)]) - self.session.execute("DROP TABLE %s.%s" % (self.ksname, self.cfname)) - self.session.execute(recreate) + execute_until_pass(self.session, "DROP TABLE {0}.{1}".format(self.ksname, self.cfname)) + execute_until_pass(self.session, recreate) # create the table again, but with formatting enabled - self.session.execute("DROP TABLE %s.%s" % (self.ksname, self.cfname)) + execute_until_pass(self.session, "DROP TABLE {0}.{1}".format(self.ksname, self.cfname)) recreate = tablemeta.as_cql_query(formatted=True) - self.session.execute(recreate) + execute_until_pass(self.session, recreate) def get_table_metadata(self): self.cluster.refresh_table_metadata(self.ksname, self.cfname) diff --git a/tests/integration/standard/test_metrics.py b/tests/integration/standard/test_metrics.py index 7b19404907..6731e9073e 100644 --- a/tests/integration/standard/test_metrics.py +++ b/tests/integration/standard/test_metrics.py @@ -23,7 +23,7 @@ from cassandra import ConsistencyLevel, WriteTimeout, Unavailable, ReadTimeout from cassandra.cluster import Cluster, NoHostAvailable -from tests.integration import get_cluster, get_node, use_singledc, PROTOCOL_VERSION +from tests.integration import get_cluster, get_node, use_singledc, PROTOCOL_VERSION, execute_until_pass def setup_module(): @@ -76,7 +76,7 @@ def test_write_timeout(self): # Assert read query = SimpleStatement("SELECT * FROM test WHERE k=1", consistency_level=ConsistencyLevel.ALL) - results = session.execute(query) + results = execute_until_pass(session, query) self.assertEqual(1, len(results)) # Pause node so it shows as unreachable to coordinator @@ -109,7 +109,7 @@ def test_read_timeout(self): # Assert read query = SimpleStatement("SELECT * FROM test WHERE k=1", consistency_level=ConsistencyLevel.ALL) - results = session.execute(query) + results = execute_until_pass(session, query) self.assertEqual(1, len(results)) # Pause node so it shows as unreachable to coordinator @@ -142,11 +142,11 @@ def test_unavailable(self): # Assert read query = SimpleStatement("SELECT * FROM test WHERE k=1", consistency_level=ConsistencyLevel.ALL) - results = session.execute(query) + results = execute_until_pass(session, query) self.assertEqual(1, len(results)) # Stop node gracefully - get_node(1).stop(wait=True, gently=True) + get_node(1).stop(wait=True, wait_other_notice=True) try: # Test write diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index dc98505b87..80a0d8e24f 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -512,8 +512,14 @@ def test_no_connection_refused_on_timeout(self): if type(result).__name__ == "WriteTimeout": received_timeout = True continue + if type(result).__name__ == "WriteFailure": + received_timeout = True + continue if type(result).__name__ == "ReadTimeout": continue + if type(result).__name__ == "ReadFailure": + continue + self.fail("Unexpected exception %s: %s" % (type(result).__name__, result.message)) # Make sure test passed diff --git a/tests/integration/standard/test_types.py b/tests/integration/standard/test_types.py index c6c8114b57..3dd87e82b9 100644 --- a/tests/integration/standard/test_types.py +++ b/tests/integration/standard/test_types.py @@ -17,9 +17,6 @@ except ImportError: import unittest # noqa -import logging -log = logging.getLogger(__name__) - from datetime import datetime import six @@ -29,7 +26,7 @@ from cassandra.query import dict_factory, ordered_dict_factory from cassandra.util import sortedset -from tests.integration import get_server_versions, use_singledc, PROTOCOL_VERSION +from tests.integration import get_server_versions, use_singledc, PROTOCOL_VERSION, execute_until_pass from tests.integration.datatype_utils import update_datatypes, PRIMITIVE_DATATYPES, COLLECTION_TYPES, \ get_sample, get_collection_sample @@ -51,9 +48,9 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): - cls.session.execute("DROP KEYSPACE typetests") + execute_until_pass(cls.session, "DROP KEYSPACE typetests") cls.cluster.shutdown() - + def test_can_insert_blob_type_as_string(self): """ Tests that byte strings in Python maps to blob type in Cassandra diff --git a/tests/integration/standard/test_udts.py b/tests/integration/standard/test_udts.py index 10b9dd390c..7d9eb99173 100644 --- a/tests/integration/standard/test_udts.py +++ b/tests/integration/standard/test_udts.py @@ -17,9 +17,6 @@ except ImportError: import unittest # noqa -import logging -log = logging.getLogger(__name__) - from collections import namedtuple from functools import partial @@ -28,7 +25,7 @@ from cassandra.query import dict_factory from cassandra.util import OrderedMap -from tests.integration import get_server_versions, use_singledc, PROTOCOL_VERSION +from tests.integration import get_server_versions, use_singledc, PROTOCOL_VERSION, execute_until_pass from tests.integration.datatype_utils import update_datatypes, PRIMITIVE_DATATYPES, COLLECTION_TYPES, \ get_sample, get_collection_sample @@ -51,13 +48,14 @@ def setUp(self): self.cluster = Cluster(protocol_version=PROTOCOL_VERSION) self.session = self.cluster.connect() - self.session.execute("CREATE KEYSPACE udttests WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1'}") + execute_until_pass(self.session, + "CREATE KEYSPACE udttests WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1'}") self.cluster.shutdown() def tearDown(self): self.cluster = Cluster(protocol_version=PROTOCOL_VERSION) self.session = self.cluster.connect() - self.session.execute("DROP KEYSPACE udttests") + execute_until_pass(self.session, "DROP KEYSPACE udttests") self.cluster.shutdown() def test_can_insert_unprepared_registered_udts(self): diff --git a/tests/integration/cqlengine/test_load.py b/tests/stress_tests/test_load.py similarity index 52% rename from tests/integration/cqlengine/test_load.py rename to tests/stress_tests/test_load.py index 4c62a525f3..6283d2e413 100644 --- a/tests/integration/cqlengine/test_load.py +++ b/tests/stress_tests/test_load.py @@ -17,35 +17,37 @@ import unittest # noqa import gc -import os -import resource from cassandra.cqlengine import columns from cassandra.cqlengine.models import Model from cassandra.cqlengine.management import sync_table +from tests.integration.cqlengine.base import BaseCassEngTestCase -class LoadTest(Model): - k = columns.Integer(primary_key=True) - v = columns.Integer() +class LoadTests(BaseCassEngTestCase): -@unittest.skipUnless("LOADTEST" in os.environ, "LOADTEST not on") -def test_lots_of_queries(): - sync_table(LoadTest) - import objgraph - gc.collect() - objgraph.show_most_common_types() + def test_lots_of_queries(self): + import resource + import objgraph - print("Starting...") + class LoadTest(Model): + k = columns.Integer(primary_key=True) + v = columns.Integer() - for i in range(1000000): - if i % 25000 == 0: - # print memory statistic - print("Memory usage: %s" % (resource.getrusage(resource.RUSAGE_SELF).ru_maxrss)) + sync_table(LoadTest) + gc.collect() + objgraph.show_most_common_types() - LoadTest.create(k=i, v=i) + print("Starting...") - objgraph.show_most_common_types() + for i in range(1000000): + if i % 25000 == 0: + # print memory statistic + print("Memory usage: %s" % (resource.getrusage(resource.RUSAGE_SELF).ru_maxrss)) - raise Exception("you shouldn't be here") + LoadTest.create(k=i, v=i) + + objgraph.show_most_common_types() + + raise Exception("you shouldn't be here") diff --git a/tests/integration/long/test_multi_inserts.py b/tests/stress_tests/test_multi_inserts.py old mode 100755 new mode 100644 similarity index 76% rename from tests/integration/long/test_multi_inserts.py rename to tests/stress_tests/test_multi_inserts.py index 10422c974b..b23a29ddca --- a/tests/integration/long/test_multi_inserts.py +++ b/tests/stress_tests/test_multi_inserts.py @@ -1,3 +1,16 @@ +# Copyright 2013-2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. try: import unittest2 as unittest @@ -14,11 +27,10 @@ def setup_module(): class StressInsertsTests(unittest.TestCase): - - ''' + """ Test case for PYTHON-124: Repeated inserts may exhaust all connections causing NoConnectionsAvailable, in_flight never decreased - ''' + """ def setUp(self): """ @@ -61,7 +73,7 @@ def test_in_flight_is_one(self): for pool in self.session._pools.values(): if leaking_connections: break - for conn in pool._connections: + for conn in pool.get_connections(): if conn.in_flight > 1: print self.session.get_pool_state() leaking_connections = True diff --git a/tests/unit/io/test_asyncorereactor.py b/tests/unit/io/test_asyncorereactor.py index 17e42a0221..988f648869 100644 --- a/tests/unit/io/test_asyncorereactor.py +++ b/tests/unit/io/test_asyncorereactor.py @@ -44,7 +44,7 @@ class AsyncoreConnectionTest(unittest.TestCase): @classmethod def setUpClass(cls): if is_monkey_patched(): - raise unittest.SkipTest("monkey-patching detected") + return AsyncoreConnection.initialize_reactor() cls.socket_patcher = patch('socket.socket', spec=socket.socket) cls.mock_socket = cls.socket_patcher.start() @@ -56,8 +56,14 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): + if is_monkey_patched(): + return cls.socket_patcher.stop() + def setUp(self): + if is_monkey_patched(): + raise unittest.SkipTest("Can't test asyncore with monkey patching") + def make_connection(self): c = AsyncoreConnection('1.2.3.4', cql_version='3.0.1') c.socket = Mock() diff --git a/tests/unit/io/test_libevreactor.py b/tests/unit/io/test_libevreactor.py index ddd2dc0417..39d66b6813 100644 --- a/tests/unit/io/test_libevreactor.py +++ b/tests/unit/io/test_libevreactor.py @@ -49,7 +49,7 @@ class LibevConnectionTest(unittest.TestCase): def setUp(self): if 'gevent.monkey' in sys.modules: - raise unittest.SkipTest("gevent monkey-patching detected") + raise unittest.SkipTest("Can't test libev with monkey patching") if LibevConnection is None: raise unittest.SkipTest('libev does not appear to be installed correctly') LibevConnection.initialize_reactor() diff --git a/tests/unit/test_time_util.py b/tests/unit/test_time_util.py index 0a3209a5f9..1c3dc1b4d1 100644 --- a/tests/unit/test_time_util.py +++ b/tests/unit/test_time_util.py @@ -42,11 +42,11 @@ def test_times_from_uuid1(self): u = uuid.uuid1(node, 0) t = util.unix_time_from_uuid1(u) - self.assertAlmostEqual(now, t, 3) + self.assertAlmostEqual(now, t, 2) dt = util.datetime_from_uuid1(u) t = calendar.timegm(dt.timetuple()) + dt.microsecond / 1e6 - self.assertAlmostEqual(now, t, 3) + self.assertAlmostEqual(now, t, 2) def test_uuid_from_time(self): t = time.time() From b4a13d4bf0f34702fdd721f1a4bb27e4079e172c Mon Sep 17 00:00:00 2001 From: Kishan Karunaratne Date: Fri, 12 Jun 2015 17:24:55 -0700 Subject: [PATCH 1518/3726] Cqlengine fixes to skip tests properly --- .../cqlengine/columns/test_validation.py | 9 ++++- .../cqlengine/management/test_management.py | 40 ++++++++++--------- .../cqlengine/model/test_model_io.py | 9 ++++- .../integration/cqlengine/model/test_udts.py | 3 +- .../cqlengine/query/test_queryset.py | 28 +++++++------ 5 files changed, 52 insertions(+), 37 deletions(-) diff --git a/tests/integration/cqlengine/columns/test_validation.py b/tests/integration/cqlengine/columns/test_validation.py index b645755574..ce8ab987c5 100644 --- a/tests/integration/cqlengine/columns/test_validation.py +++ b/tests/integration/cqlengine/columns/test_validation.py @@ -153,14 +153,19 @@ class DateTest(Model): @classmethod def setUpClass(cls): if PROTOCOL_VERSION < 4: - raise unittest.SkipTest("Protocol v4 datatypes require native protocol 4+, currently using: {0}".format(PROTOCOL_VERSION)) - + return sync_table(cls.DateTest) @classmethod def tearDownClass(cls): + if PROTOCOL_VERSION < 4: + return drop_table(cls.DateTest) + def setUp(self): + if PROTOCOL_VERSION < 4: + raise unittest.SkipTest("Protocol v4 datatypes require native protocol 4+, currently using: {0}".format(PROTOCOL_VERSION)) + def test_date_io(self): today = date.today() self.DateTest.objects.create(test_id=0, created_at=today) diff --git a/tests/integration/cqlengine/management/test_management.py b/tests/integration/cqlengine/management/test_management.py index 1601f318c8..e20680d036 100644 --- a/tests/integration/cqlengine/management/test_management.py +++ b/tests/integration/cqlengine/management/test_management.py @@ -311,30 +311,32 @@ def test_failure(self): with self.assertRaises(CQLEngineException): sync_table(self.FakeModel) +class StaticColumnTests(BaseCassEngTestCase): + def test_static_columns(self): + if PROTOCOL_VERSION < 2: + raise unittest.SkipTest("Native protocol 2+ required, currently using: {0}".format(PROTOCOL_VERSION)) -@unittest.skipUnless(PROTOCOL_VERSION >= 2, "only runs against the cql3 protocol v2.0") -def test_static_columns(): - class StaticModel(Model): - id = columns.Integer(primary_key=True) - c = columns.Integer(primary_key=True) - name = columns.Text(static=True) + class StaticModel(Model): + id = columns.Integer(primary_key=True) + c = columns.Integer(primary_key=True) + name = columns.Text(static=True) - drop_table(StaticModel) + drop_table(StaticModel) - session = get_session() + session = get_session() - with mock.patch.object(session, "execute", wraps=session.execute) as m: - sync_table(StaticModel) - - assert m.call_count > 0 - statement = m.call_args[0][0].query_string - assert '"name" text static' in statement, statement + with mock.patch.object(session, "execute", wraps=session.execute) as m: + sync_table(StaticModel) - # if we sync again, we should not apply an alter w/ a static - sync_table(StaticModel) + assert m.call_count > 0 + statement = m.call_args[0][0].query_string + assert '"name" text static' in statement, statement - with mock.patch.object(session, "execute", wraps=session.execute) as m2: + # if we sync again, we should not apply an alter w/ a static sync_table(StaticModel) - assert len(m2.call_args_list) == 1 - assert "ALTER" not in m2.call_args[0][0].query_string + with mock.patch.object(session, "execute", wraps=session.execute) as m2: + sync_table(StaticModel) + + assert len(m2.call_args_list) == 1 + assert "ALTER" not in m2.call_args[0][0].query_string diff --git a/tests/integration/cqlengine/model/test_model_io.py b/tests/integration/cqlengine/model/test_model_io.py index 35d7b81fee..3ba2aa9711 100644 --- a/tests/integration/cqlengine/model/test_model_io.py +++ b/tests/integration/cqlengine/model/test_model_io.py @@ -437,7 +437,7 @@ class TestQuerying(BaseCassEngTestCase): @classmethod def setUpClass(cls): if PROTOCOL_VERSION < 4: - raise unittest.SkipTest("Date query tests require native protocol 4+, currently using: {0}".format(PROTOCOL_VERSION)) + return super(TestQuerying, cls).setUpClass() drop_table(TestQueryModel) @@ -445,9 +445,16 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): + if PROTOCOL_VERSION < 4: + return + super(TestQuerying, cls).tearDownClass() drop_table(TestQueryModel) + def setUp(self): + if PROTOCOL_VERSION < 4: + raise unittest.SkipTest("Date query tests require native protocol 4+, currently using: {0}".format(PROTOCOL_VERSION)) + def test_query_with_date(self): uid = uuid4() day = date(2013, 11, 26) diff --git a/tests/integration/cqlengine/model/test_udts.py b/tests/integration/cqlengine/model/test_udts.py index 86cc4596d8..bf2f37020c 100644 --- a/tests/integration/cqlengine/model/test_udts.py +++ b/tests/integration/cqlengine/model/test_udts.py @@ -32,8 +32,7 @@ class UserDefinedTypeTests(BaseCassEngTestCase): - @classmethod - def setUpClass(self): + def setUp(self): if PROTOCOL_VERSION < 3: raise unittest.SkipTest("UDTs require native protocol 3+, currently using: {0}".format(PROTOCOL_VERSION)) diff --git a/tests/integration/cqlengine/query/test_queryset.py b/tests/integration/cqlengine/query/test_queryset.py index cbaea21f2d..5c2b76f985 100644 --- a/tests/integration/cqlengine/query/test_queryset.py +++ b/tests/integration/cqlengine/query/test_queryset.py @@ -689,23 +689,25 @@ def test_objects_property_returns_fresh_queryset(self): len(TestModel.objects) # evaluate queryset assert TestModel.objects._result_cache is None +class PageQueryTests(BaseCassEngTestCase): + def test_paged_result_handling(self): + if PROTOCOL_VERSION < 2: + raise unittest.SkipTest("Paging requires native protocol 2+, currently using: {0}".format(PROTOCOL_VERSION)) -@unittest.skipUnless(PROTOCOL_VERSION >= 2, "only runs against the cql3 protocol v2.0") -def test_paged_result_handling(): - # addresses #225 - class PagingTest(Model): - id = columns.Integer(primary_key=True) - val = columns.Integer() - sync_table(PagingTest) + # addresses #225 + class PagingTest(Model): + id = columns.Integer(primary_key=True) + val = columns.Integer() + sync_table(PagingTest) - PagingTest.create(id=1, val=1) - PagingTest.create(id=2, val=2) + PagingTest.create(id=1, val=1) + PagingTest.create(id=2, val=2) - session = get_session() - with mock.patch.object(session, 'default_fetch_size', 1): - results = PagingTest.objects()[:] + session = get_session() + with mock.patch.object(session, 'default_fetch_size', 1): + results = PagingTest.objects()[:] - assert len(results) == 2 + assert len(results) == 2 class ModelQuerySetTimeoutTestCase(BaseQuerySetUsage): From b3027786239ec1c397aa7b4e06a5b157f8f07f44 Mon Sep 17 00:00:00 2001 From: Mike Okner Date: Fri, 12 Jun 2015 14:30:47 -0500 Subject: [PATCH 1519/3726] Set models.DEFAULT_KEYSPACE when calling set_session() Sets the default keyspace for cqlengine models when passing in an existing session to cqlengine.connection.set_session() if the session's keyspace is set and models.DEFAULT_KEYSPACE isn't. Without this, the user will see an error when trying to perform Cassandra interactions with a Model instance like "Keyspace none does not exist" unless they explicitly set models.DEFAULT_KEYSPACE themselves ahead of time or have a __keyspace__ attribute set on the model. Also no longer raising an exception in cqlengine.connection.setup() if default_keyspace isn't set since individual models might still have __keyspace__ set. --- cassandra/cqlengine/connection.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/cassandra/cqlengine/connection.py b/cassandra/cqlengine/connection.py index ead241c6fd..c6540d4986 100644 --- a/cassandra/cqlengine/connection.py +++ b/cassandra/cqlengine/connection.py @@ -82,6 +82,11 @@ def set_session(s): session = s cluster = s.cluster + # Set default keyspace from given session's keyspace if not already set + from cassandra.cqlengine import models + if not models.DEFAULT_KEYSPACE and session.keyspace: + models.DEFAULT_KEYSPACE = session.keyspace + _register_known_types(cluster) log.debug("cqlengine connection initialized with %s", s) @@ -109,9 +114,6 @@ def setup( if 'username' in kwargs or 'password' in kwargs: raise CQLEngineException("Username & Password are now handled by using the native driver's auth_provider") - if not default_keyspace: - raise UndefinedKeyspaceException() - from cassandra.cqlengine import models models.DEFAULT_KEYSPACE = default_keyspace From be6337ee82fa30a53cd873e88d07838a8a3d0932 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 15 Jun 2015 15:32:53 -0500 Subject: [PATCH 1520/3726] Do not try to access an empty token map. Fixes an issue where multiple queries, with routing key set, to a keyspace without tokens_to_hosts, would raise a key error. --- cassandra/metadata.py | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 843d96f38e..857542c472 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -538,7 +538,6 @@ def add_or_return_host(self, host): self._hosts[host.address] = host return host, True - def remove_host(self, host): with self._hosts_lock: return bool(self._hosts.pop(host.address, False)) @@ -894,6 +893,7 @@ def _drop_table_metadata(self, table_name): for index_name in table_meta.indexes: self.indexes.pop(index_name, None) + class UserType(object): """ A user defined type, as created by ``CREATE TYPE`` statements. @@ -1607,17 +1607,17 @@ def get_replicas(self, keyspace, token): if tokens_to_hosts is None: self.rebuild_keyspace(keyspace, build_if_absent=True) tokens_to_hosts = self.tokens_to_hosts_by_ks.get(keyspace, None) - if not tokens_to_hosts: - return [] - - # token range ownership is exclusive on the LHS (the start token), so - # we use bisect_right, which, in the case of a tie/exact match, - # picks an insertion point to the right of the existing match - point = bisect_right(self.ring, token) - if point == len(self.ring): - return tokens_to_hosts[self.ring[0]] - else: - return tokens_to_hosts[self.ring[point]] + + if tokens_to_hosts: + # token range ownership is exclusive on the LHS (the start token), so + # we use bisect_right, which, in the case of a tie/exact match, + # picks an insertion point to the right of the existing match + point = bisect_right(self.ring, token) + if point == len(self.ring): + return tokens_to_hosts[self.ring[0]] + else: + return tokens_to_hosts[self.ring[point]] + return [] class Token(object): From beb14443d4688cc2c126afe91c193d127f5d1eed Mon Sep 17 00:00:00 2001 From: GregBestland Date: Mon, 15 Jun 2015 18:19:58 -0500 Subject: [PATCH 1521/3726] Adding integration tests for protocol negotiation --- tests/integration/standard/test_cluster.py | 38 +++++++++++++++++++++- 1 file changed, 37 insertions(+), 1 deletion(-) diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index bf6bb1d476..cbd8cd0996 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -30,7 +30,9 @@ WhiteListRoundRobinPolicy) from cassandra.query import SimpleStatement, TraceUnavailable -from tests.integration import use_singledc, PROTOCOL_VERSION, get_server_versions, get_node + +from tests.integration import use_singledc, PROTOCOL_VERSION, get_server_versions, get_node, CASSANDRA_VERSION + from tests.integration.util import assert_quiescent_pool_state @@ -103,6 +105,38 @@ def test_basic(self): cluster.shutdown() + def test_protocol_negotiation(self): + """ + Test for protocol negotiation + + test_protocol_negotiation tests that the driver will select the correct protocol version to match + the correct cassandra version. Please note that 2.1.5 has a + bug https://issues.apache.org/jira/browse/CASSANDRA-9451 that will cause this test to fail + that will cause this to not pass. It was rectified in 2.1.6 + + @since 2.6.0 + @jira_ticket PYTHON-240 + @expected_result the correct protocol version should be selected + + @test_category connection + """ + + cluster = Cluster() + session = cluster.connect() + default_protocol_version = session._protocol_version + + # Make sure the correct protocol was selected by default + if CASSANDRA_VERSION >= '2.2': + self.assertEqual(default_protocol_version, 4) + elif CASSANDRA_VERSION >= '2.1': + self.assertEqual(default_protocol_version, 3) + elif CASSANDRA_VERSION >= '2.0': + self.assertEqual(default_protocol_version, 2) + else: + self.assertEqual(default_protocol_version, 1) + + cluster.shutdown() + def test_connect_on_keyspace(self): """ Ensure clusters that connect on a keyspace, do @@ -577,3 +611,5 @@ def test_pool_management(self): assert_quiescent_pool_state(self, cluster) cluster.shutdown() + + From 2f3e0424698c1e8593ba3ef44169d68bd16e6038 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Tue, 16 Jun 2015 15:48:59 -0500 Subject: [PATCH 1522/3726] Adding basic integration test for PYTHON-322 --- tests/integration/long/ssl/cassandra.crt | Bin 0 -> 626 bytes tests/integration/long/ssl/driver_ca_cert.pem | 16 +++ tests/integration/long/ssl/keystore.jks | Bin 0 -> 1398 bytes tests/integration/long/test_ssl.py | 111 ++++++++++++++++++ 4 files changed, 127 insertions(+) create mode 100644 tests/integration/long/ssl/cassandra.crt create mode 100644 tests/integration/long/ssl/driver_ca_cert.pem create mode 100644 tests/integration/long/ssl/keystore.jks create mode 100644 tests/integration/long/test_ssl.py diff --git a/tests/integration/long/ssl/cassandra.crt b/tests/integration/long/ssl/cassandra.crt new file mode 100644 index 0000000000000000000000000000000000000000..432e58540babe586367427cfa5f5d7fdf8c2f170 GIT binary patch literal 626 zcmXqLV#+gUV!Xb9nTe5!iN*eN$qNHsHcqWJkGAi;jEvl@3le43th=CADhFzFDI5DpzQNcMUu_)0{(10H#$|cO~9<7#bKtxpZ|)<9y_h zU}R-rZtP_+XzXNaY-Cs{VWZtPMeySN-bE{x8gVmDm20?RUb^5+N8mfNjeff3Yacwh zXQ6mbaH++n%nWrV=kNEkRPHwDh3+ZZ&ZvK5z5801MT;Dic4RQaWUJJq`6GYj0}v6 z6%FJKWPu?m%f}+dBJwpUGf0BXN5K8Dm?LN0jjv5YT=S590E|m!pwq>d@2Lz>UN|HE z#Qeq4GaD~Wy>;u?&-|^f&VBg{Zr+jFP@Zw^^B>m9Z#HxuK5v&irBl(u!2G|)-Dy_I z#YSDrCnRLqMhjeu+~-g_wKd!K<}azMyxbeE?JfOmq*$uQm0&QzI4M4|_nSY{7K4lX aUkLAczwrJ9tC$`&f zqOZ(8Bi5~0mwmOh=efU@f6#-6#$o>ooBF>6J@?b#=HJVHa*I}I-h+CD=*T&S6SP$`!+ghorIcZ*(RTvLX zEAbYzbACS2RB3VHuaXCI)e;K7+<#vs86)s)5wD4|&c!puJEk)ow2b+&wfCO&%DI|p z=VwmSUfEdn^=1Zx@Bb+aUf7;XJJ)tVDDrdJmiCL>6V7FvHr~en^?mz+eKxN=l~pTk z3+~PgTebGmI>|;~=APU`>nkh(ieV*-k+hBJ?Vb* zpSN35+Ff2x5VybGHA$_wygfB$_BTQQP`!ncZ)Ix^)jG00ka>Qc{l)!%{f%owui89o zYm#B!x$)ZaRZRvrO?}0~=AG;L*mF&BwN2^nNMGf4_q);37iT@LJNSHhwr2)^MfZ|w zg|$jk#l3n;%Vkb~+vRQ5@TTEqfqBQm_-hdtFZ2a*2p?AnXVSm1=;5TsrwfeSU(AS; zE#9uM+FIOw@yuWKQ+LQ;wPRygpSQC8oz0OiC$2V{+-435sF#k)JRl2B)T|MDrUsV4 zgj@tn$aw}$jMo=1GchtTvDkkud11iI#;Mij(e|B}k&&B~!63_!+klgeIh2J>m?^{+ z#^DfVa&|NnF%SaDunThsC+3wTDmdpP79|=A8t{Wexr8}f5=#<;OA;$!y12ncNEnEL zd5d|tVK=aiS?p=RN4`&ZpvGIzQB@wu~>KhoXJcj>0M&v)J9VzwzsbD5YK85kEU z8ps>S0z*=kk420{!u<{|q47?;dIr;9J&QyHMVa7O%z z`HQ1xHeQ-~>(;NI`CDC``|=muyd$-tJmcEuKdh7AZ0J0E-Y$7cr=o>{`G1YO)2xz< zjk=akNXW8{7Pu6-&!Kc`YqsyrUs6|jxi?(fTl(2Zu~d&M!C-=MQha3ZH-Dxr1{e3g u5Z?2C;r$6#F+FO=s}C-`#Hg_2!GX3nhWw`v&yrZjliX3p^FL_4p(Oxq{5b#s literal 0 HcmV?d00001 diff --git a/tests/integration/long/test_ssl.py b/tests/integration/long/test_ssl.py new file mode 100644 index 0000000000..69e60db76f --- /dev/null +++ b/tests/integration/long/test_ssl.py @@ -0,0 +1,111 @@ +# Copyright 2013-2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +try: + import unittest2 as unittest +except ImportError: + import unittest + +import os +import ssl +from cassandra.cluster import Cluster +from cassandra import ConsistencyLevel +from cassandra.query import SimpleStatement +from tests.integration import use_singledc, PROTOCOL_VERSION, get_cluster + + +DEFAULT_PASSWORD = "cassandra" + +DEFAULT_SERVER_KEYSTORE_PATH = "tests/integration/long/ssl/keystore.jks" + +DEFAULT_CLIENT_CA_CERTS = 'tests/integration/long/ssl/driver_ca_cert.pem' + + +def setup_module(): + + """ + We need some custom setup for this module. This will start the ccm cluster with basic + ssl connectivity. No client authentication is performed at this time. + """ + + use_singledc(start=False) + ccm_cluster = get_cluster() + ccm_cluster.stop() + + # Fetch the absolute path to the keystore for ccm. + abs_path_server_keystore_path = os.path.abspath(DEFAULT_SERVER_KEYSTORE_PATH) + + # Configure ccm to use ssl. + config_options = {'client_encryption_options': {'enabled': True, + 'keystore': abs_path_server_keystore_path, + 'keystore_password': DEFAULT_PASSWORD}} + ccm_cluster.set_configuration_options(config_options) + ccm_cluster.start(wait_for_binary_proto=True, wait_other_notice=True) + + +def teardown_module(): + + """ + The rest of the tests don't need ssl enabled + reset the config options so as to not interfere with other tests. + """ + + ccm_cluster = get_cluster() + config_options = {} + ccm_cluster.set_configuration_options(config_options) + if ccm_cluster is not None: + ccm_cluster.stop() + + +class SSLConnectionTests(unittest.TestCase): + + def test_ssl_connection(self): + """ + Test to validate that we are able to connect to a cluster using ssl. + + test_ssl_connection Performs a simple sanity check to ensure that we can connect to a cluster with ssl. + + + @since 2.6.0 + @jira_ticket PYTHON-332 + @expected_result we can connect and preform some basic operations + + @test_category connection:ssl + """ + + # Setup temporary keyspace. + abs_path_ca_cert_path = os.path.abspath(DEFAULT_CLIENT_CA_CERTS) + + self.cluster = Cluster(protocol_version=PROTOCOL_VERSION, ssl_options={'ca_certs': abs_path_ca_cert_path, + 'ssl_version': ssl.PROTOCOL_TLSv1}) + self.session = self.cluster.connect() + + # attempt a few simple commands. + insert_keyspace = """CREATE KEYSPACE ssltest + WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '3'} + """ + statement = SimpleStatement(insert_keyspace) + statement.consistency_level = 3 + self.session.execute(statement) + + drop_keyspace = "DROP KEYSPACE ssltest" + + statement = SimpleStatement(drop_keyspace) + statement.consistency_level = ConsistencyLevel.ANY + self.session.execute(statement) + + + + From 0b6e25077b80747161ad759675b0a51944b20a47 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Wed, 17 Jun 2015 09:38:55 -0500 Subject: [PATCH 1523/3726] Updating protocol negotiation for better validation --- tests/integration/standard/test_cluster.py | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index cbd8cd0996..ee738e47a9 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -30,6 +30,7 @@ WhiteListRoundRobinPolicy) from cassandra.query import SimpleStatement, TraceUnavailable +from cassandra.protocol import MAX_SUPPORTED_VERSION from tests.integration import use_singledc, PROTOCOL_VERSION, get_server_versions, get_node, CASSANDRA_VERSION @@ -122,18 +123,23 @@ def test_protocol_negotiation(self): """ cluster = Cluster() + self.assertEqual(cluster.protocol_version, MAX_SUPPORTED_VERSION) session = cluster.connect() - default_protocol_version = session._protocol_version - + updated_protocol_version = session._protocol_version + updated_cluster_version = cluster.protocol_version # Make sure the correct protocol was selected by default if CASSANDRA_VERSION >= '2.2': - self.assertEqual(default_protocol_version, 4) + self.assertEqual(updated_protocol_version, 4) + self.assertEqual(updated_cluster_version, 4) elif CASSANDRA_VERSION >= '2.1': - self.assertEqual(default_protocol_version, 3) + self.assertEqual(updated_protocol_version, 3) + self.assertEqual(updated_cluster_version, 3) elif CASSANDRA_VERSION >= '2.0': - self.assertEqual(default_protocol_version, 2) + self.assertEqual(updated_protocol_version, 2) + self.assertEqual(updated_cluster_version, 2) else: - self.assertEqual(default_protocol_version, 1) + self.assertEqual(updated_protocol_version, 1) + self.assertEqual(updated_cluster_version, 1) cluster.shutdown() From 3315da86e68ad47638b253a8ddbf1da499ae9fe5 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 17 Jun 2015 09:46:52 -0500 Subject: [PATCH 1524/3726] Remove superfluous schema retry in CC connect Now that we're not waiting for agreement in the first place, this retry should not be needed. --- cassandra/cluster.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index b7d5061693..284314bee7 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2121,9 +2121,6 @@ def _try_connect(self, host): self._refresh_node_list_and_token_map(connection, preloaded_results=shared_results) self._refresh_schema(connection, preloaded_results=shared_results, schema_agreement_wait=-1) - if not self._cluster.metadata.keyspaces: - log.warning("[control connection] No schema built on connect; retrying without wait for schema agreement") - self._refresh_schema(connection, preloaded_results=shared_results, schema_agreement_wait=0) except Exception: connection.close() raise From 37cc21fd063ee9258db322ed0ec39edbc1b0633f Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 17 Jun 2015 10:47:22 -0500 Subject: [PATCH 1525/3726] Make benchmark setup/teardown use minimal protocol version --- benchmarks/base.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/benchmarks/base.py b/benchmarks/base.py index 09e76a507d..3d02a150c2 100644 --- a/benchmarks/base.py +++ b/benchmarks/base.py @@ -61,7 +61,7 @@ def setup(hosts): log.info("Using 'cassandra' package from %s", cassandra.__path__) - cluster = Cluster(hosts) + cluster = Cluster(hosts, protocol_version=1) cluster.set_core_connections_per_host(HostDistance.LOCAL, 1) try: session = cluster.connect() @@ -89,7 +89,7 @@ def setup(hosts): def teardown(hosts): - cluster = Cluster(hosts) + cluster = Cluster(hosts, protocol_version=1) cluster.set_core_connections_per_host(HostDistance.LOCAL, 1) session = cluster.connect() session.execute("DROP KEYSPACE " + KEYSPACE) From 675e287a427c699c24b8ea24050519a7c7d858a6 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 17 Jun 2015 13:02:32 -0500 Subject: [PATCH 1526/3726] Explicitly import gevent.sll in reactor impl Fixes an issue where ssl is not available at the package level, depending on what has executed thus far. --- cassandra/io/geventreactor.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cassandra/io/geventreactor.py b/cassandra/io/geventreactor.py index a49fb737f6..5d8553cec6 100644 --- a/cassandra/io/geventreactor.py +++ b/cassandra/io/geventreactor.py @@ -12,9 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. import gevent -from gevent import select, socket from gevent.event import Event from gevent.queue import Queue +from gevent import select, socket +import gevent.ssl from collections import defaultdict from functools import partial From df41a349f5a0883683be5ce1f543c5243caeb4b8 Mon Sep 17 00:00:00 2001 From: Kishan Karunaratne Date: Wed, 17 Jun 2015 18:51:15 -0700 Subject: [PATCH 1527/3726] retry cluster connection in SSL test --- tests/integration/long/test_ssl.py | 56 +++++++++++++++--------------- 1 file changed, 28 insertions(+), 28 deletions(-) diff --git a/tests/integration/long/test_ssl.py b/tests/integration/long/test_ssl.py index 69e60db76f..d7cfc5a841 100644 --- a/tests/integration/long/test_ssl.py +++ b/tests/integration/long/test_ssl.py @@ -12,29 +12,25 @@ # See the License for the specific language governing permissions and # limitations under the License. - try: import unittest2 as unittest except ImportError: import unittest -import os -import ssl +import os, sys, traceback, logging, ssl from cassandra.cluster import Cluster from cassandra import ConsistencyLevel from cassandra.query import SimpleStatement -from tests.integration import use_singledc, PROTOCOL_VERSION, get_cluster +from tests.integration import use_singledc, PROTOCOL_VERSION, get_cluster, remove_cluster +log = logging.getLogger(__name__) DEFAULT_PASSWORD = "cassandra" - DEFAULT_SERVER_KEYSTORE_PATH = "tests/integration/long/ssl/keystore.jks" - DEFAULT_CLIENT_CA_CERTS = 'tests/integration/long/ssl/driver_ca_cert.pem' def setup_module(): - """ We need some custom setup for this module. This will start the ccm cluster with basic ssl connectivity. No client authentication is performed at this time. @@ -56,31 +52,27 @@ def setup_module(): def teardown_module(): - """ - The rest of the tests don't need ssl enabled - reset the config options so as to not interfere with other tests. + The rest of the tests don't need ssl enabled, remove the cluster so as to not interfere with other tests. """ - ccm_cluster = get_cluster() - config_options = {} - ccm_cluster.set_configuration_options(config_options) - if ccm_cluster is not None: - ccm_cluster.stop() + remove_cluster() class SSLConnectionTests(unittest.TestCase): - def test_ssl_connection(self): + def test_can_connect_with_ssl_ca(self): """ - Test to validate that we are able to connect to a cluster using ssl. - - test_ssl_connection Performs a simple sanity check to ensure that we can connect to a cluster with ssl. + Test to validate that we are able to connect to a cluster using ssl. + test_can_connect_with_ssl_ca performs a simple sanity check to ensure that we can connect to a cluster with ssl + authentication via simple server-side shared certificate authority. The client is able to validate the identity + of the server, however by using this method the server can't trust the client unless additional authentication + has been provided. @since 2.6.0 @jira_ticket PYTHON-332 - @expected_result we can connect and preform some basic operations + @expected_result The client can connect via SSL and preform some basic operations @test_category connection:ssl """ @@ -88,9 +80,20 @@ def test_ssl_connection(self): # Setup temporary keyspace. abs_path_ca_cert_path = os.path.abspath(DEFAULT_CLIENT_CA_CERTS) - self.cluster = Cluster(protocol_version=PROTOCOL_VERSION, ssl_options={'ca_certs': abs_path_ca_cert_path, + cluster = Cluster(protocol_version=PROTOCOL_VERSION, ssl_options={'ca_certs': abs_path_ca_cert_path, 'ssl_version': ssl.PROTOCOL_TLSv1}) - self.session = self.cluster.connect() + tries = 0 + while True: + if tries > 5: + raise RuntimeError("Failed to connect to SSL cluster after 5 attempts") + try: + session = cluster.connect() + break + except Exception: + ex_type, ex, tb = sys.exc_info() + log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) + del tb + tries += 1 # attempt a few simple commands. insert_keyspace = """CREATE KEYSPACE ssltest @@ -98,14 +101,11 @@ def test_ssl_connection(self): """ statement = SimpleStatement(insert_keyspace) statement.consistency_level = 3 - self.session.execute(statement) + session.execute(statement) drop_keyspace = "DROP KEYSPACE ssltest" - statement = SimpleStatement(drop_keyspace) statement.consistency_level = ConsistencyLevel.ANY - self.session.execute(statement) - - - + session.execute(statement) + cluster.shutdown() \ No newline at end of file From 2cb42b307b3dd5b3b4bec7239cd8b7bc3039db47 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 18 Jun 2015 11:29:20 -0500 Subject: [PATCH 1528/3726] benchmarks: Raise log level for cassandra package Reduce node discovery noise in default output. --- benchmarks/base.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/benchmarks/base.py b/benchmarks/base.py index 3d02a150c2..65df52864f 100644 --- a/benchmarks/base.py +++ b/benchmarks/base.py @@ -36,6 +36,8 @@ handler.setFormatter(logging.Formatter("%(asctime)s [%(levelname)s] %(name)s: %(message)s")) log.addHandler(handler) +logging.getLogger('cassandra').setLevel(logging.WARN) + have_libev = False supported_reactors = [AsyncoreConnection] try: From d7929bcd2e0a865c758d001578d36cb3cbff5fbf Mon Sep 17 00:00:00 2001 From: Kishan Karunaratne Date: Thu, 18 Jun 2015 16:50:54 -0700 Subject: [PATCH 1529/3726] more fixes for jenkins test stabilization --- tests/integration/long/test_failure_types.py | 17 +++++++++++++++-- tests/integration/long/test_ssl.py | 4 ++-- tests/integration/standard/test_cluster.py | 2 +- tests/integration/standard/test_metadata.py | 6 +++--- 4 files changed, 21 insertions(+), 8 deletions(-) diff --git a/tests/integration/long/test_failure_types.py b/tests/integration/long/test_failure_types.py index 64d06f46a3..30e05b60b6 100644 --- a/tests/integration/long/test_failure_types.py +++ b/tests/integration/long/test_failure_types.py @@ -81,6 +81,19 @@ def tearDown(self): # Restart the nodes to fully functional again self.setFailingNodes(failing_nodes, "testksfail") + def execute_helper(self, session, query): + tries = 0 + while tries < 100: + try: + return session.execute(query) + except OperationTimedOut: + ex_type, ex, tb = sys.exc_info() + log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) + del tb + tries += 1 + + raise RuntimeError("Failed to execute query after 100 attempts: {0}".format(query)) + def execute_concurrent_args_helper(self, session, query, params): tries = 0 while tries < 100: @@ -129,10 +142,10 @@ def _perform_cql_statement(self, text, consistency_level, expected_exception): statement.consistency_level = consistency_level if expected_exception is None: - self.session.execute(statement) + self.execute_helper(self.session, statement) else: with self.assertRaises(expected_exception): - self.session.execute(statement) + self.execute_helper(self.session, statement) def test_write_failures_from_coordinator(self): """ diff --git a/tests/integration/long/test_ssl.py b/tests/integration/long/test_ssl.py index d7cfc5a841..931e6d112c 100644 --- a/tests/integration/long/test_ssl.py +++ b/tests/integration/long/test_ssl.py @@ -80,13 +80,13 @@ def test_can_connect_with_ssl_ca(self): # Setup temporary keyspace. abs_path_ca_cert_path = os.path.abspath(DEFAULT_CLIENT_CA_CERTS) - cluster = Cluster(protocol_version=PROTOCOL_VERSION, ssl_options={'ca_certs': abs_path_ca_cert_path, - 'ssl_version': ssl.PROTOCOL_TLSv1}) tries = 0 while True: if tries > 5: raise RuntimeError("Failed to connect to SSL cluster after 5 attempts") try: + cluster = Cluster(protocol_version=PROTOCOL_VERSION, ssl_options={'ca_certs': abs_path_ca_cert_path, + 'ssl_version': ssl.PROTOCOL_TLSv1}) session = cluster.connect() break except Exception: diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index df41b67ed1..b8439c4de3 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -352,7 +352,7 @@ def test_refresh_schema_type(self): current_test1rf_meta = current_meta[keyspace_name] current_type_meta = current_test1rf_meta.user_types[type_name] self.assertIs(original_meta, current_meta) - self.assertIs(original_test1rf_meta, current_test1rf_meta) + self.assertEqual(original_test1rf_meta.export_as_string(), current_test1rf_meta.export_as_string()) self.assertIsNot(original_type_meta, current_type_meta) self.assertEqual(original_type_meta.as_cql_query(), current_type_meta.as_cql_query()) session.shutdown() diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index d9b67e9963..de89e8c507 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -255,12 +255,12 @@ def test_composite_in_compound_primary_key_ordering(self): def test_indexes(self): create_statement = self.make_create_statement(["a"], ["b", "c"], ["d", "e", "f"]) create_statement += " WITH CLUSTERING ORDER BY (b ASC, c ASC)" - self.session.execute(create_statement) + execute_until_pass(self.session, create_statement) d_index = "CREATE INDEX d_index ON %s.%s (d)" % (self.ksname, self.cfname) e_index = "CREATE INDEX e_index ON %s.%s (e)" % (self.ksname, self.cfname) - self.session.execute(d_index) - self.session.execute(e_index) + execute_until_pass(self.session, d_index) + execute_until_pass(self.session, e_index) tablemeta = self.get_table_metadata() statements = tablemeta.export_as_string().strip() From 93331e0e2243a3d21d3cd515757ce8ebadb7047d Mon Sep 17 00:00:00 2001 From: Kishan Karunaratne Date: Thu, 18 Jun 2015 18:58:44 -0700 Subject: [PATCH 1530/3726] a few more test stabilizations --- tests/integration/standard/test_types.py | 8 ++++---- tests/integration/standard/test_udts.py | 18 +++++++++--------- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/tests/integration/standard/test_types.py b/tests/integration/standard/test_types.py index 3dd87e82b9..f5c148ddd4 100644 --- a/tests/integration/standard/test_types.py +++ b/tests/integration/standard/test_types.py @@ -261,7 +261,7 @@ def test_can_insert_empty_strings_and_nulls(self): if datatype in string_types: string_columns.add(col_name) - s.execute("CREATE TABLE all_empty ({0})".format(', '.join(alpha_type_list))) + execute_until_pass(s, "CREATE TABLE all_empty ({0})".format(', '.join(alpha_type_list))) # verify all types initially null with simple statement columns_string = ','.join(col_names) @@ -349,11 +349,11 @@ def test_can_insert_empty_values_for_int32(self): """ s = self.session - s.execute("CREATE TABLE empty_values (a text PRIMARY KEY, b int)") - s.execute("INSERT INTO empty_values (a, b) VALUES ('a', blobAsInt(0x))") + execute_until_pass(s, "CREATE TABLE empty_values (a text PRIMARY KEY, b int)") + execute_until_pass(s, "INSERT INTO empty_values (a, b) VALUES ('a', blobAsInt(0x))") try: Int32Type.support_empty_values = True - results = s.execute("SELECT b FROM empty_values WHERE a='a'")[0] + results = execute_until_pass(s, "SELECT b FROM empty_values WHERE a='a'")[0] self.assertIs(EMPTY, results.b) finally: Int32Type.support_empty_values = False diff --git a/tests/integration/standard/test_udts.py b/tests/integration/standard/test_udts.py index 7d9eb99173..eeef0508ba 100644 --- a/tests/integration/standard/test_udts.py +++ b/tests/integration/standard/test_udts.py @@ -326,21 +326,21 @@ def test_can_insert_udts_with_varying_lengths(self): def nested_udt_schema_helper(self, session, MAX_NESTING_DEPTH): # create the seed udt - session.execute("CREATE TYPE depth_0 (age int, name text)") + execute_until_pass(session, "CREATE TYPE depth_0 (age int, name text)") # create the nested udts for i in range(MAX_NESTING_DEPTH): - session.execute("CREATE TYPE depth_{} (value frozen)".format(i + 1, i)) + execute_until_pass(session, "CREATE TYPE depth_{} (value frozen)".format(i + 1, i)) # create a table with multiple sizes of nested udts # no need for all nested types, only a spot checked few and the largest one - session.execute("CREATE TABLE mytable (" - "k int PRIMARY KEY, " - "v_0 frozen, " - "v_1 frozen, " - "v_2 frozen, " - "v_3 frozen, " - "v_{0} frozen)".format(MAX_NESTING_DEPTH)) + execute_until_pass(session, "CREATE TABLE mytable (" + "k int PRIMARY KEY, " + "v_0 frozen, " + "v_1 frozen, " + "v_2 frozen, " + "v_3 frozen, " + "v_{0} frozen)".format(MAX_NESTING_DEPTH)) def nested_udt_creation_helper(self, udts, i): if i == 0: From ee2243c217e6ada6275845f3cb8f5c3f8a7e8e7b Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 19 Jun 2015 10:08:48 -0500 Subject: [PATCH 1531/3726] Revert "Revert "Merge pull request #298 from datastax/PYTHON-108"" This reverts commit dfa91b8bd5d6dcfe13ce3f4caf4eb721a7dd3d18. Conflicts: cassandra/cluster.py cassandra/connection.py cassandra/io/asyncorereactor.py cassandra/io/eventletreactor.py cassandra/io/geventreactor.py cassandra/io/libevreactor.py cassandra/io/twistedreactor.py cassandra/query.py --- cassandra/cluster.py | 92 +++++++++++--------- cassandra/connection.py | 105 +++++++++++++++++++++-- cassandra/io/asyncorereactor.py | 62 +++++--------- cassandra/io/eventletreactor.py | 55 +++++++----- cassandra/io/geventreactor.py | 55 +++++++----- cassandra/io/libevreactor.py | 60 ++++++++----- cassandra/io/libevwrapper.c | 133 +++++++++++++++++++++++++++++ cassandra/io/twistedreactor.py | 61 +++++++------ cassandra/query.py | 6 +- docs/object_mapper.rst | 2 +- tests/unit/test_connection.py | 3 - tests/unit/test_response_future.py | 22 ++--- 12 files changed, 460 insertions(+), 196 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 828fa8266b..45784c4623 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1442,8 +1442,7 @@ class Session(object): """ A default timeout, measured in seconds, for queries executed through :meth:`.execute()` or :meth:`.execute_async()`. This default may be - overridden with the `timeout` parameter for either of those methods - or the `timeout` parameter for :meth:`.ResponseFuture.result()`. + overridden with the `timeout` parameter for either of those methods. Setting this to :const:`None` will cause no timeouts to be set by default. @@ -1581,17 +1580,14 @@ def execute(self, query, parameters=None, timeout=_NOT_SET, trace=False, custom_ If `query` is a Statement with its own custom_payload. The message payload will be a union of the two, with the values specified here taking precedence. """ - if timeout is _NOT_SET: - timeout = self.default_timeout - if trace and not isinstance(query, Statement): raise TypeError( "The query argument must be an instance of a subclass of " "cassandra.query.Statement when trace=True") - future = self.execute_async(query, parameters, trace, custom_payload) + future = self.execute_async(query, parameters, trace, custom_payload, timeout) try: - result = future.result(timeout) + result = future.result() finally: if trace: try: @@ -1601,7 +1597,7 @@ def execute(self, query, parameters=None, timeout=_NOT_SET, trace=False, custom_ return result - def execute_async(self, query, parameters=None, trace=False, custom_payload=None): + def execute_async(self, query, parameters=None, trace=False, custom_payload=None, timeout=_NOT_SET): """ Execute the given query and return a :class:`~.ResponseFuture` object which callbacks may be attached to for asynchronous response @@ -1646,11 +1642,14 @@ def execute_async(self, query, parameters=None, trace=False, custom_payload=None ... log.exception("Operation failed:") """ - future = self._create_response_future(query, parameters, trace, custom_payload) + if timeout is _NOT_SET: + timeout = self.default_timeout + + future = self._create_response_future(query, parameters, trace, custom_payload, timeout) future.send_request() return future - def _create_response_future(self, query, parameters, trace, custom_payload): + def _create_response_future(self, query, parameters, trace, custom_payload, timeout): """ Returns the ResponseFuture before calling send_request() on it """ prepared_statement = None @@ -1704,7 +1703,7 @@ def _create_response_future(self, query, parameters, trace, custom_payload): message.update_custom_payload(custom_payload) return ResponseFuture( - self, message, query, self.default_timeout, metrics=self._metrics, + self, message, query, timeout, metrics=self._metrics, prepared_statement=prepared_statement) def prepare(self, query, custom_payload=None): @@ -1737,11 +1736,10 @@ def prepare(self, query, custom_payload=None): message. See :ref:`custom_payload`. """ message = PrepareMessage(query=query) - message.custom_payload = custom_payload - future = ResponseFuture(self, message, query=None) + future = ResponseFuture(self, message, query=None, timeout=self.default_timeout) try: future.send_request() - query_id, column_metadata, pk_indexes = future.result(self.default_timeout) + query_id, column_metadata, pk_indexes = future.result() except Exception: log.exception("Error preparing query:") raise @@ -1767,7 +1765,7 @@ def prepare_on_all_hosts(self, query, excluded_host): futures = [] for host in self._pools.keys(): if host != excluded_host and host.is_up: - future = ResponseFuture(self, PrepareMessage(query=query), None) + future = ResponseFuture(self, PrepareMessage(query=query), None, self.default_timeout) # we don't care about errors preparing against specific hosts, # since we can always prepare them as needed when the prepared @@ -1788,7 +1786,7 @@ def prepare_on_all_hosts(self, query, excluded_host): for host, future in futures: try: - future.result(self.default_timeout) + future.result() except Exception: log.exception("Error preparing query for host %s:", host) @@ -2832,13 +2830,14 @@ class ResponseFuture(object): _paging_state = None _custom_payload = None _warnings = None + _timer = None - def __init__(self, session, message, query, default_timeout=None, metrics=None, prepared_statement=None): + def __init__(self, session, message, query, timeout, metrics=None, prepared_statement=None): self.session = session self.row_factory = session.row_factory self.message = message self.query = query - self.default_timeout = default_timeout + self.timeout = timeout self._metrics = metrics self.prepared_statement = prepared_statement self._callback_lock = Lock() @@ -2849,6 +2848,18 @@ def __init__(self, session, message, query, default_timeout=None, metrics=None, self._errors = {} self._callbacks = [] self._errbacks = [] + self._start_timer() + + def _start_timer(self): + if self.timeout is not None: + self._timer = self.session.cluster.connection_class.create_timer(self.timeout, self._on_timeout) + + def _cancel_timer(self): + if self._timer: + self._timer.cancel() + + def _on_timeout(self): + self._set_final_exception(OperationTimedOut(self._errors, self._current_host)) def _make_query_plan(self): # convert the list/generator/etc to an iterator so that subsequent @@ -2973,6 +2984,7 @@ def start_fetching_next_page(self): self._event.clear() self._final_result = _NOT_SET self._final_exception = None + self._start_timer() self.send_request() def _reprepare(self, prepare_message): @@ -3187,6 +3199,7 @@ def _execute_after_prepare(self, response): "statement on host %s: %s" % (self._current_host, response))) def _set_final_result(self, response): + self._cancel_timer() if self._metrics is not None: self._metrics.request_timer.addValue(time.time() - self._start_time) @@ -3201,6 +3214,7 @@ def _set_final_result(self, response): fn(response, *args, **kwargs) def _set_final_exception(self, response): + self._cancel_timer() if self._metrics is not None: self._metrics.request_timer.addValue(time.time() - self._start_time) @@ -3244,6 +3258,11 @@ def result(self, timeout=_NOT_SET): encountered. If the final result or error has not been set yet, this method will block until that time. + .. versionchanged:: 2.6.0 + + **`timeout` is deprecated. Use timeout in the Session execute functions instead. + The following description applies to deprecated behavior:** + You may set a timeout (in seconds) with the `timeout` parameter. By default, the :attr:`~.default_timeout` for the :class:`.Session` this was created through will be used for the timeout on this @@ -3257,11 +3276,6 @@ def result(self, timeout=_NOT_SET): This is a client-side timeout. For more information about server-side coordinator timeouts, see :class:`.policies.RetryPolicy`. - **Important**: This timeout currently has no effect on callbacks registered - on a :class:`~.ResponseFuture` through :meth:`.ResponseFuture.add_callback` or - :meth:`.ResponseFuture.add_errback`; even if a query exceeds this default - timeout, neither the registered callback or errback will be called. - Example usage:: >>> future = session.execute_async("SELECT * FROM mycf") @@ -3275,27 +3289,24 @@ def result(self, timeout=_NOT_SET): ... log.exception("Operation failed:") """ - if timeout is _NOT_SET: - timeout = self.default_timeout + if timeout is not _NOT_SET: + msg = "ResponseFuture.result timeout argument is deprecated. Specify the request timeout via Session.execute[_async]." + warnings.warn(msg, DeprecationWarning) + log.warning(msg) + else: + timeout = None + self._event.wait(timeout) + # TODO: remove this conditional when deprecated timeout parameter is removed + if not self._event.is_set(): + self._on_timeout() if self._final_result is not _NOT_SET: if self._paging_state is None: return self._final_result else: - return PagedResult(self, self._final_result, timeout) - elif self._final_exception: - raise self._final_exception + return PagedResult(self, self._final_result) else: - self._event.wait(timeout=timeout) - if self._final_result is not _NOT_SET: - if self._paging_state is None: - return self._final_result - else: - return PagedResult(self, self._final_result, timeout) - elif self._final_exception: - raise self._final_exception - else: - raise OperationTimedOut(errors=self._errors, last_host=self._current_host) + raise self._final_exception def get_query_trace(self, max_wait=None): """ @@ -3450,10 +3461,9 @@ class will be returned. response_future = None - def __init__(self, response_future, initial_response, timeout=_NOT_SET): + def __init__(self, response_future, initial_response): self.response_future = response_future self.current_response = iter(initial_response) - self.timeout = timeout def __iter__(self): return self @@ -3466,7 +3476,7 @@ def next(self): raise self.response_future.start_fetching_next_page() - result = self.response_future.result(self.timeout) + result = self.response_future.result() if self.response_future.has_more_pages: self.current_response = result.current_response else: diff --git a/cassandra/connection.py b/cassandra/connection.py index 464ec792ba..95858bd8c1 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -16,6 +16,7 @@ from collections import defaultdict, deque, namedtuple import errno from functools import wraps, partial +from heapq import heappush, heappop import io import logging import socket @@ -44,7 +45,8 @@ QueryMessage, ResultMessage, decode_response, InvalidRequestException, SupportedMessage, AuthResponseMessage, AuthChallengeMessage, - AuthSuccessMessage, ProtocolException, MAX_SUPPORTED_VERSION) + AuthSuccessMessage, ProtocolException, + MAX_SUPPORTED_VERSION, RegisterMessage) from cassandra.util import OrderedDict @@ -219,6 +221,7 @@ def __init__(self, host='127.0.0.1', port=9042, authenticator=None, self.is_control_connection = is_control_connection self.user_type_map = user_type_map self._push_watchers = defaultdict(set) + self._callbacks = {} self._iobuf = io.BytesIO() if protocol_version >= 3: @@ -233,6 +236,7 @@ def __init__(self, host='127.0.0.1', port=9042, authenticator=None, self.highest_request_id = self.max_request_id self.lock = RLock() + self.connected_event = Event() @classmethod def initialize_reactor(self): @@ -250,6 +254,10 @@ def handle_fork(self): """ pass + @classmethod + def create_timer(cls, timeout, callback): + raise NotImplementedError() + @classmethod def factory(cls, host, timeout, *args, **kwargs): """ @@ -407,11 +415,24 @@ def wait_for_responses(self, *msgs, **kwargs): self.defunct(exc) raise - def register_watcher(self, event_type, callback): - raise NotImplementedError() + def register_watcher(self, event_type, callback, register_timeout=None): + """ + Register a callback for a given event type. + """ + self._push_watchers[event_type].add(callback) + self.wait_for_response( + RegisterMessage(event_list=[event_type]), + timeout=register_timeout) - def register_watchers(self, type_callback_dict): - raise NotImplementedError() + def register_watchers(self, type_callback_dict, register_timeout=None): + """ + Register multiple callback/event type pairs, expressed as a dict. + """ + for event_type, callback in type_callback_dict.items(): + self._push_watchers[event_type].add(callback) + self.wait_for_response( + RegisterMessage(event_list=type_callback_dict.keys()), + timeout=register_timeout) def control_conn_disposed(self): self.is_control_connection = False @@ -907,3 +928,77 @@ def stop(self): def _raise_if_stopped(self): if self._shutdown_event.is_set(): raise self.ShutdownException() + + +class Timer(object): + + canceled = False + + def __init__(self, timeout, callback): + self.end = time.time() + timeout + self.callback = callback + if timeout < 0: + self.callback() + + def cancel(self): + self.canceled = True + + def finish(self, time_now): + if self.canceled: + return True + + if time_now >= self.end: + self.callback() + return True + + return False + + +class TimerManager(object): + + def __init__(self): + self._queue = [] + self._new_timers = [] + + def add_timer(self, timer): + """ + called from client thread with a Timer object + """ + self._new_timers.append((timer.end, timer)) + + def service_timeouts(self): + """ + run callbacks on all expired timers + Called from the event thread + :return: next end time, or None + """ + queue = self._queue + new_timers = self._new_timers + while new_timers: + heappush(queue, new_timers.pop()) + + now = time.time() + while queue: + try: + timer = queue[0][1] + if timer.finish(now): + heappop(queue) + else: + return timer.end + except Exception: + log.exception("Exception while servicing timeout callback: ") + + @property + def next_timeout(self): + try: + return self._queue[0][0] + except IndexError: + pass + + @property + def next_offset(self): + try: + next_end = self._queue[0][0] + return next_end - time.time() + except IndexError: + pass diff --git a/cassandra/io/asyncorereactor.py b/cassandra/io/asyncorereactor.py index 616b5c0000..fae87a7354 100644 --- a/cassandra/io/asyncorereactor.py +++ b/cassandra/io/asyncorereactor.py @@ -19,6 +19,7 @@ import socket import sys from threading import Event, Lock, Thread +import time import weakref from six.moves import range @@ -35,8 +36,9 @@ except ImportError: ssl = None # NOQA -from cassandra.connection import (Connection, ConnectionShutdown, NONBLOCKING) -from cassandra.protocol import RegisterMessage +from cassandra.connection import (Connection, ConnectionShutdown, + ConnectionException, NONBLOCKING, + Timer, TimerManager) log = logging.getLogger(__name__) @@ -52,15 +54,17 @@ def _cleanup(loop_weakref): class AsyncoreLoop(object): + def __init__(self): self._pid = os.getpid() self._loop_lock = Lock() self._started = False self._shutdown = False - self._conns_lock = Lock() - self._conns = WeakSet() self._thread = None + + self._timers = TimerManager() + atexit.register(partial(_cleanup, weakref.ref(self))) def maybe_start(self): @@ -83,24 +87,22 @@ def maybe_start(self): def _run_loop(self): log.debug("Starting asyncore event loop") with self._loop_lock: - while True: + while not self._shutdown: try: - asyncore.loop(timeout=0.001, use_poll=True, count=1000) + asyncore.loop(timeout=0.001, use_poll=True, count=100) + self._timers.service_timeouts() + if not asyncore.socket_map: + time.sleep(0.005) except Exception: log.debug("Asyncore event loop stopped unexepectedly", exc_info=True) break - - if self._shutdown: - break - - with self._conns_lock: - if len(self._conns) == 0: - break - self._started = False log.debug("Asyncore event loop ended") + def add_timer(self, timer): + self._timers.add_timer(timer) + def _cleanup(self): self._shutdown = True if not self._thread: @@ -115,14 +117,6 @@ def _cleanup(self): log.debug("Event loop thread was joined") - def connection_created(self, connection): - with self._conns_lock: - self._conns.add(connection) - - def connection_destroyed(self, connection): - with self._conns_lock: - self._conns.discard(connection) - class AsyncoreConnection(Connection, asyncore.dispatcher): """ @@ -152,18 +146,19 @@ def handle_fork(cls): cls._loop._cleanup() cls._loop = None + @classmethod + def create_timer(cls, timeout, callback): + timer = Timer(timeout, callback) + cls._loop.add_timer(timer) + return timer + def __init__(self, *args, **kwargs): Connection.__init__(self, *args, **kwargs) asyncore.dispatcher.__init__(self) - self.connected_event = Event() - - self._callbacks = {} self.deque = deque() self.deque_lock = Lock() - self._loop.connection_created(self) - self._connect_socket() asyncore.dispatcher.__init__(self, self._socket) @@ -187,8 +182,6 @@ def close(self): asyncore.dispatcher.close(self) log.debug("Closed socket to %s", self.host) - self._loop.connection_destroyed(self) - if not self.is_defunct: self.error_all_callbacks( ConnectionShutdown("Connection to %s was closed" % self.host)) @@ -267,14 +260,3 @@ def writable(self): def readable(self): return self._readable or (self.is_control_connection and not (self.is_defunct or self.is_closed)) - - def register_watcher(self, event_type, callback, register_timeout=None): - self._push_watchers[event_type].add(callback) - self.wait_for_response( - RegisterMessage(event_list=[event_type]), timeout=register_timeout) - - def register_watchers(self, type_callback_dict, register_timeout=None): - for event_type, callback in type_callback_dict.items(): - self._push_watchers[event_type].add(callback) - self.wait_for_response( - RegisterMessage(event_list=type_callback_dict.keys()), timeout=register_timeout) diff --git a/cassandra/io/eventletreactor.py b/cassandra/io/eventletreactor.py index e65d5aa544..b0206f43d2 100644 --- a/cassandra/io/eventletreactor.py +++ b/cassandra/io/eventletreactor.py @@ -16,7 +16,6 @@ # Originally derived from MagnetoDB source: # https://github.com/stackforge/magnetodb/blob/2015.1.0b1/magnetodb/common/cassandra/io/eventletreactor.py -from collections import defaultdict from errno import EALREADY, EINPROGRESS, EWOULDBLOCK, EINVAL import eventlet from eventlet.green import select, socket @@ -24,11 +23,12 @@ from functools import partial import logging import os -from six.moves import xrange from threading import Event +import time + +from six.moves import xrange -from cassandra.connection import Connection, ConnectionShutdown -from cassandra.protocol import RegisterMessage +from cassandra.connection import Connection, ConnectionShutdown, Timer, TimerManager log = logging.getLogger(__name__) @@ -52,19 +52,45 @@ class EventletConnection(Connection): _socket_impl = eventlet.green.socket _ssl_impl = eventlet.green.ssl + _timers = None + _timeout_watcher = None + _new_timer = None + @classmethod def initialize_reactor(cls): eventlet.monkey_patch() + if not cls._timers: + cls._timers = TimerManager() + cls._timeout_watcher = eventlet.spawn(cls.service_timeouts) + cls._new_timer = Event() + + @classmethod + def create_timer(cls, timeout, callback): + timer = Timer(timeout, callback) + cls._timers.add_timer(timer) + cls._new_timer.set() + return timer + + @classmethod + def service_timeouts(cls): + """ + cls._timeout_watcher runs in this loop forever. + It is usually waiting for the next timeout on the cls._new_timer Event. + When new timers are added, that event is set so that the watcher can + wake up and possibly set an earlier timeout. + """ + timer_manager = cls._timers + while True: + next_end = timer_manager.service_timeouts() + sleep_time = max(next_end - time.time(), 0) if next_end else 10000 + cls._new_timer.wait(sleep_time) + cls._new_timer.clear() def __init__(self, *args, **kwargs): Connection.__init__(self, *args, **kwargs) - self.connected_event = Event() self._write_queue = Queue() - self._callbacks = {} - self._push_watchers = defaultdict(set) - self._connect_socket() self._read_watcher = eventlet.spawn(lambda: self.handle_read()) @@ -142,16 +168,3 @@ def push(self, data): chunk_size = self.out_buffer_size for i in xrange(0, len(data), chunk_size): self._write_queue.put(data[i:i + chunk_size]) - - def register_watcher(self, event_type, callback, register_timeout=None): - self._push_watchers[event_type].add(callback) - self.wait_for_response( - RegisterMessage(event_list=[event_type]), - timeout=register_timeout) - - def register_watchers(self, type_callback_dict, register_timeout=None): - for event_type, callback in type_callback_dict.items(): - self._push_watchers[event_type].add(callback) - self.wait_for_response( - RegisterMessage(event_list=type_callback_dict.keys()), - timeout=register_timeout) diff --git a/cassandra/io/geventreactor.py b/cassandra/io/geventreactor.py index 5d8553cec6..60a3f2d031 100644 --- a/cassandra/io/geventreactor.py +++ b/cassandra/io/geventreactor.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import gevent -from gevent.event import Event +import gevent.event from gevent.queue import Queue from gevent import select, socket import gevent.ssl @@ -21,13 +21,13 @@ from functools import partial import logging import os +import time -from six.moves import xrange +from six.moves import range from errno import EALREADY, EINPROGRESS, EWOULDBLOCK, EINVAL -from cassandra.connection import Connection, ConnectionShutdown -from cassandra.protocol import RegisterMessage +from cassandra.connection import Connection, ConnectionShutdown, Timer, TimerManager log = logging.getLogger(__name__) @@ -51,15 +51,39 @@ class GeventConnection(Connection): _socket_impl = gevent.socket _ssl_impl = gevent.ssl + _timers = None + _timeout_watcher = None + _new_timer = None + + @classmethod + def initialize_reactor(cls): + if not cls._timers: + cls._timers = TimerManager() + cls._timeout_watcher = gevent.spawn(cls.service_timeouts) + cls._new_timer = gevent.event.Event() + + @classmethod + def create_timer(cls, timeout, callback): + timer = Timer(timeout, callback) + cls._timers.add_timer(timer) + cls._new_timer.set() + return timer + + @classmethod + def service_timeouts(cls): + timer_manager = cls._timers + timer_event = cls._new_timer + while True: + next_end = timer_manager.service_timeouts() + sleep_time = max(next_end - time.time(), 0) if next_end else 10000 + timer_event.wait(sleep_time) + timer_event.clear() + def __init__(self, *args, **kwargs): Connection.__init__(self, *args, **kwargs) - self.connected_event = Event() self._write_queue = Queue() - self._callbacks = {} - self._push_watchers = defaultdict(set) - self._connect_socket() self._read_watcher = gevent.spawn(self.handle_read) @@ -142,18 +166,5 @@ def handle_read(self): def push(self, data): chunk_size = self.out_buffer_size - for i in xrange(0, len(data), chunk_size): + for i in range(0, len(data), chunk_size): self._write_queue.put(data[i:i + chunk_size]) - - def register_watcher(self, event_type, callback, register_timeout=None): - self._push_watchers[event_type].add(callback) - self.wait_for_response( - RegisterMessage(event_list=[event_type]), - timeout=register_timeout) - - def register_watchers(self, type_callback_dict, register_timeout=None): - for event_type, callback in type_callback_dict.items(): - self._push_watchers[event_type].add(callback) - self.wait_for_response( - RegisterMessage(event_list=type_callback_dict.keys()), - timeout=register_timeout) diff --git a/cassandra/io/libevreactor.py b/cassandra/io/libevreactor.py index 00127a1fa2..af0c3a6015 100644 --- a/cassandra/io/libevreactor.py +++ b/cassandra/io/libevreactor.py @@ -17,13 +17,14 @@ import logging import os import socket -from threading import Event, Lock, Thread +import ssl +from threading import Lock, Thread import weakref -from six.moves import xrange +from six.moves import range -from cassandra.connection import Connection, ConnectionShutdown, NONBLOCKING, ssl -from cassandra.protocol import RegisterMessage +from cassandra.connection import (Connection, ConnectionShutdown, + NONBLOCKING, Timer, TimerManager) try: import cassandra.io.libevwrapper as libev except ImportError: @@ -44,7 +45,6 @@ def _cleanup(loop_weakref): loop = loop_weakref() except ReferenceError: return - loop._cleanup() @@ -79,10 +79,10 @@ def __init__(self): self._loop.unref() self._preparer.start() - atexit.register(partial(_cleanup, weakref.ref(self))) + self._timers = TimerManager() + self._loop_timer = libev.Timer(self._loop, self._on_loop_timer) - def notify(self): - self._notifier.send() + atexit.register(partial(_cleanup, weakref.ref(self))) def maybe_start(self): should_start = False @@ -127,6 +127,7 @@ def _cleanup(self): conn._read_watcher.stop() del conn._read_watcher + self.notify() # wake the timer watcher log.debug("Waiting for event loop thread to join...") self._thread.join(timeout=1.0) if self._thread.is_alive(): @@ -137,6 +138,24 @@ def _cleanup(self): log.debug("Event loop thread was joined") self._loop = None + def add_timer(self, timer): + self._timers.add_timer(timer) + self._notifier.send() # wake up in case this timer is earlier + + def _update_timer(self): + if not self._shutdown: + self._timers.service_timeouts() + offset = self._timers.next_offset or 100000 # none pending; will be updated again when something new happens + self._loop_timer.start(offset) + else: + self._loop_timer.stop() + + def _on_loop_timer(self): + self._timers.service_timeouts() + + def notify(self): + self._notifier.send() + def connection_created(self, conn): with self._conn_set_lock: new_live_conns = self._live_conns.copy() @@ -199,6 +218,9 @@ def _loop_will_run(self, prepare): changed = True + # TODO: update to do connection management, timer updates through dedicated async 'notifier' callbacks + self._update_timer() + if changed: self._notifier.send() @@ -229,12 +251,15 @@ def handle_fork(cls): cls._libevloop._cleanup() cls._libevloop = None + @classmethod + def create_timer(cls, timeout, callback): + timer = Timer(timeout, callback) + cls._libevloop.add_timer(timer) + return timer + def __init__(self, *args, **kwargs): Connection.__init__(self, *args, **kwargs) - self.connected_event = Event() - - self._callbacks = {} self.deque = deque() self._deque_lock = Lock() self._connect_socket() @@ -332,7 +357,7 @@ def push(self, data): sabs = self.out_buffer_size if len(data) > sabs: chunks = [] - for i in xrange(0, len(data), sabs): + for i in range(0, len(data), sabs): chunks.append(data[i:i + sabs]) else: chunks = [data] @@ -340,14 +365,3 @@ def push(self, data): with self._deque_lock: self.deque.extend(chunks) self._libevloop.notify() - - def register_watcher(self, event_type, callback, register_timeout=None): - self._push_watchers[event_type].add(callback) - self.wait_for_response( - RegisterMessage(event_list=[event_type]), timeout=register_timeout) - - def register_watchers(self, type_callback_dict, register_timeout=None): - for event_type, callback in type_callback_dict.items(): - self._push_watchers[event_type].add(callback) - self.wait_for_response( - RegisterMessage(event_list=type_callback_dict.keys()), timeout=register_timeout) diff --git a/cassandra/io/libevwrapper.c b/cassandra/io/libevwrapper.c index cbac83b277..99e1df30f7 100644 --- a/cassandra/io/libevwrapper.c +++ b/cassandra/io/libevwrapper.c @@ -451,6 +451,131 @@ static PyTypeObject libevwrapper_PrepareType = { (initproc)Prepare_init, /* tp_init */ }; +typedef struct libevwrapper_Timer { + PyObject_HEAD + struct ev_timer timer; + struct libevwrapper_Loop *loop; + PyObject *callback; +} libevwrapper_Timer; + +static void +Timer_dealloc(libevwrapper_Timer *self) { + Py_XDECREF(self->loop); + Py_XDECREF(self->callback); + Py_TYPE(self)->tp_free((PyObject *)self); +} + +static void timer_callback(struct ev_loop *loop, ev_timer *watcher, int revents) { + libevwrapper_Timer *self = watcher->data; + + PyObject *result = NULL; + PyGILState_STATE gstate; + + gstate = PyGILState_Ensure(); + result = PyObject_CallFunction(self->callback, NULL); + if (!result) { + PyErr_WriteUnraisable(self->callback); + } + Py_XDECREF(result); + + PyGILState_Release(gstate); +} + +static int +Timer_init(libevwrapper_Timer *self, PyObject *args, PyObject *kwds) { + PyObject *callback; + PyObject *loop; + + if (!PyArg_ParseTuple(args, "OO", &loop, &callback)) { + return -1; + } + + if (loop) { + Py_INCREF(loop); + self->loop = (libevwrapper_Loop *)loop; + } else { + return -1; + } + + if (callback) { + if (!PyCallable_Check(callback)) { + PyErr_SetString(PyExc_TypeError, "callback parameter must be callable"); + Py_XDECREF(loop); + return -1; + } + Py_INCREF(callback); + self->callback = callback; + } + ev_init(&self->timer, timer_callback); + self->timer.data = self; + return 0; +} + +static PyObject * +Timer_start(libevwrapper_Timer *self, PyObject *args) { + double timeout; + if (!PyArg_ParseTuple(args, "d", &timeout)) { + return NULL; + } + /* some tiny non-zero number to avoid zero, and + make it run immediately for negative timeouts */ + self->timer.repeat = fmax(timeout, 0.000000001); + ev_timer_again(self->loop->loop, &self->timer); + Py_RETURN_NONE; +} + +static PyObject * +Timer_stop(libevwrapper_Timer *self, PyObject *args) { + ev_timer_stop(self->loop->loop, &self->timer); + Py_RETURN_NONE; +} + +static PyMethodDef Timer_methods[] = { + {"start", (PyCFunction)Timer_start, METH_VARARGS, "Start the Timer watcher"}, + {"stop", (PyCFunction)Timer_stop, METH_NOARGS, "Stop the Timer watcher"}, + {NULL} /* Sentinal */ +}; + +static PyTypeObject libevwrapper_TimerType = { + PyVarObject_HEAD_INIT(NULL, 0) + "cassandra.io.libevwrapper.Timer", /*tp_name*/ + sizeof(libevwrapper_Timer), /*tp_basicsize*/ + 0, /*tp_itemsize*/ + (destructor)Timer_dealloc, /*tp_dealloc*/ + 0, /*tp_print*/ + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + 0, /*tp_compare*/ + 0, /*tp_repr*/ + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash */ + 0, /*tp_call*/ + 0, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /*tp_flags*/ + "Timer objects", /* tp_doc */ + 0, /* tp_traverse */ + 0, /* tp_clear */ + 0, /* tp_richcompare */ + 0, /* tp_weaklistoffset */ + 0, /* tp_iter */ + 0, /* tp_iternext */ + Timer_methods, /* tp_methods */ + 0, /* tp_members */ + 0, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + (initproc)Timer_init, /* tp_init */ +}; + + static PyMethodDef module_methods[] = { {NULL} /* Sentinal */ }; @@ -500,6 +625,10 @@ initlibevwrapper(void) if (PyType_Ready(&libevwrapper_AsyncType) < 0) INITERROR; + libevwrapper_TimerType.tp_new = PyType_GenericNew; + if (PyType_Ready(&libevwrapper_TimerType) < 0) + INITERROR; + # if PY_MAJOR_VERSION >= 3 module = PyModule_Create(&moduledef); # else @@ -532,6 +661,10 @@ initlibevwrapper(void) if (PyModule_AddObject(module, "Async", (PyObject *)&libevwrapper_AsyncType) == -1) INITERROR; + Py_INCREF(&libevwrapper_TimerType); + if (PyModule_AddObject(module, "Timer", (PyObject *)&libevwrapper_TimerType) == -1) + INITERROR; + if (!PyEval_ThreadsInitialized()) { PyEval_InitThreads(); } diff --git a/cassandra/io/twistedreactor.py b/cassandra/io/twistedreactor.py index 1de8de1554..b02fb6ab32 100644 --- a/cassandra/io/twistedreactor.py +++ b/cassandra/io/twistedreactor.py @@ -15,15 +15,15 @@ Module that implements an event loop based on twisted ( https://twistedmatrix.com ). """ -from twisted.internet import reactor, protocol -from threading import Event, Thread, Lock +import atexit from functools import partial import logging +from threading import Thread, Lock +import time +from twisted.internet import reactor, protocol import weakref -import atexit -from cassandra.connection import Connection, ConnectionShutdown -from cassandra.protocol import RegisterMessage +from cassandra.connection import Connection, ConnectionShutdown, Timer, TimerManager log = logging.getLogger(__name__) @@ -108,9 +108,12 @@ class TwistedLoop(object): _lock = None _thread = None + _timeout_task = None + _timeout = None def __init__(self): self._lock = Lock() + self._timers = TimerManager() def maybe_start(self): with self._lock: @@ -132,6 +135,27 @@ def _cleanup(self): "Cluster.shutdown() to avoid this.") log.debug("Event loop thread was joined") + def add_timer(self, timer): + self._timers.add_timer(timer) + # callFromThread to schedule from the loop thread, where + # the timeout task can safely be modified + reactor.callFromThread(self._schedule_timeout, timer.end) + + def _schedule_timeout(self, next_timeout): + if next_timeout: + delay = max(next_timeout - time.time(), 0) + if self._timeout_task and self._timeout_task.active(): + if next_timeout < self._timeout: + self._timeout_task.reset(delay) + self._timeout = next_timeout + else: + self._timeout_task = reactor.callLater(delay, self._on_loop_timer) + self._timeout = next_timeout + + def _on_loop_timer(self): + self._timers.service_timeouts() + self._schedule_timeout(self._timers.next_timeout) + class TwistedConnection(Connection): """ @@ -146,6 +170,12 @@ def initialize_reactor(cls): if not cls._loop: cls._loop = TwistedLoop() + @classmethod + def create_timer(cls, timeout, callback): + timer = Timer(timeout, callback) + cls._loop.add_timer(timer) + return timer + def __init__(self, *args, **kwargs): """ Initialization method. @@ -157,11 +187,9 @@ def __init__(self, *args, **kwargs): """ Connection.__init__(self, *args, **kwargs) - self.connected_event = Event() self.is_closed = True self.connector = None - self._callbacks = {} reactor.callFromThread(self.add_connection) self._loop.maybe_start() @@ -218,22 +246,3 @@ def push(self, data): the event loop when it gets the chance. """ reactor.callFromThread(self.connector.transport.write, data) - - def register_watcher(self, event_type, callback, register_timeout=None): - """ - Register a callback for a given event type. - """ - self._push_watchers[event_type].add(callback) - self.wait_for_response( - RegisterMessage(event_list=[event_type]), - timeout=register_timeout) - - def register_watchers(self, type_callback_dict, register_timeout=None): - """ - Register multiple callback/event type pairs, expressed as a dict. - """ - for event_type, callback in type_callback_dict.items(): - self._push_watchers[event_type].add(callback) - self.wait_for_response( - RegisterMessage(event_list=type_callback_dict.keys()), - timeout=register_timeout) diff --git a/cassandra/query.py b/cassandra/query.py index 6709cbeff6..21b668dfb3 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -917,14 +917,14 @@ def populate(self, max_wait=2.0): break def _execute(self, query, parameters, time_spent, max_wait): + timeout = (max_wait - time_spent) if max_wait is not None else None + future = self._session._create_response_future(query, parameters, trace=False, custom_payload=None, timeout=timeout) # in case the user switched the row factory, set it to namedtuple for this query - future = self._session._create_response_future(query, parameters, trace=False, custom_payload=None) future.row_factory = named_tuple_factory future.send_request() - timeout = (max_wait - time_spent) if max_wait is not None else None try: - return future.result(timeout=timeout) + return future.result() except OperationTimedOut: raise TraceUnavailable("Trace information was not available within %f seconds" % (max_wait,)) diff --git a/docs/object_mapper.rst b/docs/object_mapper.rst index 26d78a0964..4e38994064 100644 --- a/docs/object_mapper.rst +++ b/docs/object_mapper.rst @@ -48,7 +48,7 @@ Getting Started from cassandra.cqlengine import columns from cassandra.cqlengine import connection from datetime import datetime - from cassandra.cqlengine.management import create_keyspace, sync_table + from cassandra.cqlengine.management import sync_table from cassandra.cqlengine.models import Model #first, define a model diff --git a/tests/unit/test_connection.py b/tests/unit/test_connection.py index e5be60759d..268e19d535 100644 --- a/tests/unit/test_connection.py +++ b/tests/unit/test_connection.py @@ -244,10 +244,7 @@ def test_not_implemented(self): Ensure the following methods throw NIE's. If not, come back and test them. """ c = self.make_connection() - self.assertRaises(NotImplementedError, c.close) - self.assertRaises(NotImplementedError, c.register_watcher, None, None) - self.assertRaises(NotImplementedError, c.register_watchers, None) def test_set_keyspace_blocking(self): c = self.make_connection() diff --git a/tests/unit/test_response_future.py b/tests/unit/test_response_future.py index 92351a9d1d..eea43f7586 100644 --- a/tests/unit/test_response_future.py +++ b/tests/unit/test_response_future.py @@ -47,7 +47,7 @@ def make_session(self): def make_response_future(self, session): query = SimpleStatement("SELECT * FROM foo") message = QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE) - return ResponseFuture(session, message, query) + return ResponseFuture(session, message, query, 1) def make_mock_response(self, results): return Mock(spec=ResultMessage, kind=RESULT_KIND_ROWS, results=results, paging_state=None) @@ -122,7 +122,7 @@ def test_read_timeout_error_message(self): query.retry_policy.on_read_timeout.return_value = (RetryPolicy.RETHROW, None) message = QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE) - rf = ResponseFuture(session, message, query) + rf = ResponseFuture(session, message, query, 1) rf.send_request() result = Mock(spec=ReadTimeoutErrorMessage, info={}) @@ -137,7 +137,7 @@ def test_write_timeout_error_message(self): query.retry_policy.on_write_timeout.return_value = (RetryPolicy.RETHROW, None) message = QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE) - rf = ResponseFuture(session, message, query) + rf = ResponseFuture(session, message, query, 1) rf.send_request() result = Mock(spec=WriteTimeoutErrorMessage, info={}) @@ -151,7 +151,7 @@ def test_unavailable_error_message(self): query.retry_policy.on_unavailable.return_value = (RetryPolicy.RETHROW, None) message = QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE) - rf = ResponseFuture(session, message, query) + rf = ResponseFuture(session, message, query, 1) rf.send_request() result = Mock(spec=UnavailableErrorMessage, info={}) @@ -165,7 +165,7 @@ def test_retry_policy_says_ignore(self): query.retry_policy.on_unavailable.return_value = (RetryPolicy.IGNORE, None) message = QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE) - rf = ResponseFuture(session, message, query) + rf = ResponseFuture(session, message, query, 1) rf.send_request() result = Mock(spec=UnavailableErrorMessage, info={}) @@ -184,7 +184,7 @@ def test_retry_policy_says_retry(self): connection = Mock(spec=Connection) pool.borrow_connection.return_value = (connection, 1) - rf = ResponseFuture(session, message, query) + rf = ResponseFuture(session, message, query, 1) rf.send_request() rf.session._pools.get.assert_called_once_with('ip1') @@ -279,7 +279,7 @@ def test_all_pools_shutdown(self): session._load_balancer.make_query_plan.return_value = ['ip1', 'ip2'] session._pools.get.return_value.is_shutdown = True - rf = ResponseFuture(session, Mock(), Mock()) + rf = ResponseFuture(session, Mock(), Mock(), 1) rf.send_request() self.assertRaises(NoHostAvailable, rf.result) @@ -354,7 +354,7 @@ def test_errback(self): query.retry_policy.on_unavailable.return_value = (RetryPolicy.RETHROW, None) message = QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE) - rf = ResponseFuture(session, message, query) + rf = ResponseFuture(session, message, query, 1) rf.send_request() rf.add_errback(self.assertIsInstance, Exception) @@ -401,7 +401,7 @@ def test_multiple_errbacks(self): query.retry_policy.on_unavailable.return_value = (RetryPolicy.RETHROW, None) message = QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE) - rf = ResponseFuture(session, message, query) + rf = ResponseFuture(session, message, query, 1) rf.send_request() callback = Mock() @@ -431,7 +431,7 @@ def test_add_callbacks(self): message = QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE) # test errback - rf = ResponseFuture(session, message, query) + rf = ResponseFuture(session, message, query, 1) rf.send_request() rf.add_callbacks( @@ -443,7 +443,7 @@ def test_add_callbacks(self): self.assertRaises(Exception, rf.result) # test callback - rf = ResponseFuture(session, message, query) + rf = ResponseFuture(session, message, query, 1) rf.send_request() callback = Mock() From f779da72d8374c7306ccdf81cc731af2a5323381 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 19 Jun 2015 13:01:10 -0500 Subject: [PATCH 1532/3726] For callback chaining, use old pattern of no-timeouts Avoids the overhead of timer management when using the callback chaining pattern. --- benchmarks/callback_full_pipeline.py | 2 +- cassandra/concurrent.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/benchmarks/callback_full_pipeline.py b/benchmarks/callback_full_pipeline.py index 707d127c9b..7baa919494 100644 --- a/benchmarks/callback_full_pipeline.py +++ b/benchmarks/callback_full_pipeline.py @@ -42,7 +42,7 @@ def insert_next(self, previous_result=sentinel): self.event.set() if next(self.num_started) <= self.num_queries: - future = self.session.execute_async(self.query, self.values) + future = self.session.execute_async(self.query, self.values, timeout=None) future.add_callbacks(self.insert_next, self.insert_next) def run(self): diff --git a/cassandra/concurrent.py b/cassandra/concurrent.py index 8d7743e1b4..b96c921bd2 100644 --- a/cassandra/concurrent.py +++ b/cassandra/concurrent.py @@ -138,7 +138,7 @@ def _handle_error(error, result_index, event, session, statements, results, return try: - future = session.execute_async(statement, params) + future = session.execute_async(statement, params, timeout=None) args = (next_index, event, session, statements, results, future, num_finished, to_execute, first_error) future.add_callbacks( callback=_execute_next, callback_args=args, @@ -176,7 +176,7 @@ def _execute_next(result, result_index, event, session, statements, results, return try: - future = session.execute_async(statement, params) + future = session.execute_async(statement, params, timeout=None) args = (next_index, event, session, statements, results, future, num_finished, to_execute, first_error) future.add_callbacks( callback=_execute_next, callback_args=args, From a327ab13a56077e007f4f74de99b950b99d66f73 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 19 Jun 2015 13:12:02 -0500 Subject: [PATCH 1533/3726] micro-optimize no timer case --- cassandra/connection.py | 38 +++++++++++++++--------------------- cassandra/io/libevreactor.py | 7 ++++--- 2 files changed, 20 insertions(+), 25 deletions(-) diff --git a/cassandra/connection.py b/cassandra/connection.py index 95858bd8c1..20b69c117c 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -973,20 +973,22 @@ def service_timeouts(self): :return: next end time, or None """ queue = self._queue - new_timers = self._new_timers - while new_timers: - heappush(queue, new_timers.pop()) - - now = time.time() - while queue: - try: - timer = queue[0][1] - if timer.finish(now): - heappop(queue) - else: - return timer.end - except Exception: - log.exception("Exception while servicing timeout callback: ") + if self._new_timers: + new_timers = self._new_timers + while new_timers: + heappush(queue, new_timers.pop()) + + if queue: + now = time.time() + while queue: + try: + timer = queue[0][1] + if timer.finish(now): + heappop(queue) + else: + return timer.end + except Exception: + log.exception("Exception while servicing timeout callback: ") @property def next_timeout(self): @@ -994,11 +996,3 @@ def next_timeout(self): return self._queue[0][0] except IndexError: pass - - @property - def next_offset(self): - try: - next_end = self._queue[0][0] - return next_end - time.time() - except IndexError: - pass diff --git a/cassandra/io/libevreactor.py b/cassandra/io/libevreactor.py index af0c3a6015..9114af133b 100644 --- a/cassandra/io/libevreactor.py +++ b/cassandra/io/libevreactor.py @@ -19,6 +19,7 @@ import socket import ssl from threading import Lock, Thread +import time import weakref from six.moves import range @@ -144,9 +145,9 @@ def add_timer(self, timer): def _update_timer(self): if not self._shutdown: - self._timers.service_timeouts() - offset = self._timers.next_offset or 100000 # none pending; will be updated again when something new happens - self._loop_timer.start(offset) + next_end = self._timers.service_timeouts() + if next_end: + self._loop_timer.start(next_end - time.time()) # timer handles negative values else: self._loop_timer.stop() From 1ffd5190a77e59f15c1037c70ce60872b85d07b9 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 22 Jun 2015 09:55:06 -0500 Subject: [PATCH 1534/3726] For meta export, don't copy in default min/max compaction thresholds Fixes an issue where table meta export would contain min/max_threshold settings, even for compaction strategies that do not accept those. Resulting CQL would cause "ConfigurationException ... code=2300 ... Properties specified [min_threshold, max_threshold] are not understood by [some compaction strategy]" --- cassandra/metadata.py | 15 +++----- tests/integration/standard/test_metadata.py | 42 ++++++++++++++++----- 2 files changed, 38 insertions(+), 19 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 8d550cee41..1ae5ea11ac 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -1207,7 +1207,6 @@ def primary_key(self): "rows_per_partition_to_cache", "memtable_flush_period_in_ms", "populate_io_cache_on_flush", - "compaction", "compression", "default_time_to_live") @@ -1350,17 +1349,13 @@ def as_cql_query(self, formatted=False): def _make_option_strings(self): ret = [] options_copy = dict(self.options.items()) - if not options_copy.get('compaction'): - options_copy.pop('compaction', None) - actual_options = json.loads(options_copy.pop('compaction_strategy_options', '{}')) - for system_table_name, compact_option_name in self.compaction_options.items(): - value = options_copy.pop(system_table_name, None) - if value: - actual_options.setdefault(compact_option_name, value) + actual_options = json.loads(options_copy.pop('compaction_strategy_options', '{}')) + value = options_copy.pop("compaction_strategy_class", None) + actual_options.setdefault("class", value) - compaction_option_strings = ["'%s': '%s'" % (k, v) for k, v in actual_options.items()] - ret.append('compaction = {%s}' % ', '.join(compaction_option_strings)) + compaction_option_strings = ["'%s': '%s'" % (k, v) for k, v in actual_options.items()] + ret.append('compaction = {%s}' % ', '.join(compaction_option_strings)) for system_table_name in self.compaction_options.keys(): options_copy.pop(system_table_name, None) # delete if present diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index de89e8c507..611dce2307 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -313,6 +313,30 @@ def test_compression_disabled(self): tablemeta = self.get_table_metadata() self.assertIn("compression = {}", tablemeta.export_as_string()) + def test_non_size_tiered_compaction(self): + """ + test options for non-size-tiered compaction strategy + + Creates a table with LeveledCompactionStrategy, specifying one non-default option. Verifies that the option is + present in generated CQL, and that other legacy table parameters (min_threshold, max_threshold) are not included. + + @since 2.6.0 + @jira_ticket PYTHON-352 + @expected_result the options map for LeveledCompactionStrategy does not contain min_threshold, max_threshold + + @test_category metadata + """ + create_statement = self.make_create_statement(["a"], [], ["b", "c"]) + create_statement += "WITH COMPACTION = {'class': 'LeveledCompactionStrategy', 'tombstone_threshold': '0.3'}" + self.session.execute(create_statement) + + table_meta = self.get_table_metadata() + cql = table_meta.export_as_string() + self.assertIn("'tombstone_threshold': '0.3'", cql) + self.assertIn("LeveledCompactionStrategy", cql) + self.assertNotIn("min_threshold", cql) + self.assertNotIn("max_threshold", cql) + def test_refresh_schema_metadata(self): """ test for synchronously refreshing all cluster metadata @@ -670,7 +694,7 @@ def test_export_keyspace_schema_udts(self): ) WITH bloom_filter_fp_chance = 0.01 AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}' AND comment = '' - AND compaction = {'min_threshold': '4', 'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32'} + AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy'} AND compression = {'sstable_compression': 'org.apache.cassandra.io.compress.LZ4Compressor'} AND dclocal_read_repair_chance = 0.1 AND default_time_to_live = 0 @@ -691,7 +715,7 @@ def test_export_keyspace_schema_udts(self): ) WITH bloom_filter_fp_chance = 0.01 AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}' AND comment = '' - AND compaction = {'min_threshold': '4', 'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32'} + AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy'} AND compression = {'sstable_compression': 'org.apache.cassandra.io.compress.LZ4Compressor'} AND dclocal_read_repair_chance = 0.1 AND default_time_to_live = 0 @@ -926,7 +950,7 @@ def test_legacy_tables(self): AND CLUSTERING ORDER BY (t ASC, b ASC, s ASC) AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}' AND comment = 'Stores file meta data' - AND compaction = {'min_threshold': '4', 'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32'} + AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy'} AND compression = {'sstable_compression': 'org.apache.cassandra.io.compress.LZ4Compressor'} AND dclocal_read_repair_chance = 0.1 AND default_time_to_live = 0 @@ -948,7 +972,7 @@ def test_legacy_tables(self): ) WITH COMPACT STORAGE AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}' AND comment = '' - AND compaction = {'min_threshold': '4', 'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32'} + AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy'} AND compression = {'sstable_compression': 'org.apache.cassandra.io.compress.LZ4Compressor'} AND dclocal_read_repair_chance = 0.1 AND default_time_to_live = 0 @@ -967,7 +991,7 @@ def test_legacy_tables(self): ) WITH COMPACT STORAGE AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}' AND comment = '' - AND compaction = {'min_threshold': '4', 'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32'} + AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy'} AND compression = {'sstable_compression': 'org.apache.cassandra.io.compress.LZ4Compressor'} AND dclocal_read_repair_chance = 0.1 AND default_time_to_live = 0 @@ -988,7 +1012,7 @@ def test_legacy_tables(self): AND CLUSTERING ORDER BY (column1 ASC) AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}' AND comment = '' - AND compaction = {'min_threshold': '4', 'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32'} + AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy'} AND compression = {'sstable_compression': 'org.apache.cassandra.io.compress.LZ4Compressor'} AND dclocal_read_repair_chance = 0.1 AND default_time_to_live = 0 @@ -1005,7 +1029,7 @@ def test_legacy_tables(self): ) WITH COMPACT STORAGE AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}' AND comment = '' - AND compaction = {'min_threshold': '4', 'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32'} + AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy'} AND compression = {'sstable_compression': 'org.apache.cassandra.io.compress.LZ4Compressor'} AND dclocal_read_repair_chance = 0.1 AND default_time_to_live = 0 @@ -1025,7 +1049,7 @@ def test_legacy_tables(self): AND CLUSTERING ORDER BY (column1 ASC) AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}' AND comment = '' - AND compaction = {'min_threshold': '4', 'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32'} + AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy'} AND compression = {'sstable_compression': 'org.apache.cassandra.io.compress.LZ4Compressor'} AND dclocal_read_repair_chance = 0.1 AND default_time_to_live = 0 @@ -1052,7 +1076,7 @@ def test_legacy_tables(self): AND CLUSTERING ORDER BY (column1 ASC, column1 ASC, column2 ASC) AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}' AND comment = 'Stores file meta data' - AND compaction = {'min_threshold': '4', 'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32'} + AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy'} AND compression = {'sstable_compression': 'org.apache.cassandra.io.compress.LZ4Compressor'} AND dclocal_read_repair_chance = 0.1 AND default_time_to_live = 0 From 22b92df9f970784968abdd4eed656a491f443c88 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 22 Jun 2015 17:11:43 -0500 Subject: [PATCH 1535/3726] Add protocol_hander_class to allow extension, serdes specialization --- cassandra/cluster.py | 18 ++- cassandra/connection.py | 12 +- cassandra/protocol.py | 190 +++++++++++++++++++------------- docs/api/cassandra/cluster.rst | 2 + docs/api/cassandra/protocol.rst | 9 ++ tests/integration/__init__.py | 2 +- 6 files changed, 151 insertions(+), 82 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 45784c4623..853a8fef85 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -60,7 +60,7 @@ IsBootstrappingErrorMessage, BatchMessage, RESULT_KIND_PREPARED, RESULT_KIND_SET_KEYSPACE, RESULT_KIND_ROWS, - RESULT_KIND_SCHEMA_CHANGE, MIN_SUPPORTED_VERSION) + RESULT_KIND_SCHEMA_CHANGE, MIN_SUPPORTED_VERSION, ProtocolHandler) from cassandra.metadata import Metadata, protect_name, murmur3 from cassandra.policies import (TokenAwarePolicy, DCAwareRoundRobinPolicy, SimpleConvictionPolicy, ExponentialReconnectionPolicy, HostDistance, @@ -419,6 +419,15 @@ def auth_provider(self, value): GeventConnection will be used automatically. """ + protocol_handler_class = ProtocolHandler + """ + Specifies a protocol handler class, which can be used to override or extend features + such as message or type deserialization. + + The class must conform to the public classmethod interface defined in the default + implementation, :class:`cassandra.protocol.ProtocolHandler` + """ + control_connection_timeout = 2.0 """ A timeout, in seconds, for queries made by the control connection, such @@ -515,7 +524,8 @@ def __init__(self, idle_heartbeat_interval=30, schema_event_refresh_window=2, topology_event_refresh_window=10, - connect_timeout=5): + connect_timeout=5, + protocol_handler_class=None): """ Any of the mutable Cluster attributes may be set as keyword arguments to the constructor. @@ -559,6 +569,9 @@ def __init__(self, if connection_class is not None: self.connection_class = connection_class + if protocol_handler_class is not None: + self.protocol_handler_class = protocol_handler_class + self.metrics_enabled = metrics_enabled self.ssl_options = ssl_options self.sockopts = sockopts @@ -798,6 +811,7 @@ def _make_connection_kwargs(self, address, kwargs_dict): kwargs_dict['cql_version'] = self.cql_version kwargs_dict['protocol_version'] = self.protocol_version kwargs_dict['user_type_map'] = self._user_types + kwargs_dict['protocol_handler_class'] = self.protocol_handler_class return kwargs_dict diff --git a/cassandra/connection.py b/cassandra/connection.py index 20b69c117c..d1e3a9c97a 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -42,7 +42,7 @@ from cassandra.marshal import int32_pack, uint8_unpack from cassandra.protocol import (ReadyMessage, AuthenticateMessage, OptionsMessage, StartupMessage, ErrorMessage, CredentialsMessage, - QueryMessage, ResultMessage, decode_response, + QueryMessage, ResultMessage, ProtocolHandler, InvalidRequestException, SupportedMessage, AuthResponseMessage, AuthChallengeMessage, AuthSuccessMessage, ProtocolException, @@ -209,7 +209,7 @@ class Connection(object): def __init__(self, host='127.0.0.1', port=9042, authenticator=None, ssl_options=None, sockopts=None, compression=True, cql_version=None, protocol_version=MAX_SUPPORTED_VERSION, is_control_connection=False, - user_type_map=None): + user_type_map=None, protocol_handler_class=ProtocolHandler): self.host = host self.port = port self.authenticator = authenticator @@ -220,6 +220,8 @@ def __init__(self, host='127.0.0.1', port=9042, authenticator=None, self.protocol_version = protocol_version self.is_control_connection = is_control_connection self.user_type_map = user_type_map + self.decoder = protocol_handler_class.decode_message + self.encoder = protocol_handler_class.encode_message self._push_watchers = defaultdict(set) self._callbacks = {} self._iobuf = io.BytesIO() @@ -362,7 +364,7 @@ def send_msg(self, msg, request_id, cb): raise ConnectionShutdown("Connection to %s is closed" % self.host) self._callbacks[request_id] = cb - self.push(msg.to_binary(request_id, self.protocol_version, compression=self.compressor)) + self.push(self.encoder(msg, request_id, self.protocol_version, compressor=self.compressor)) return request_id def wait_for_response(self, msg, timeout=None): @@ -498,8 +500,8 @@ def process_msg(self, header, body): self.msg_received = True try: - response = decode_response(header.version, self.user_type_map, stream_id, - header.flags, header.opcode, body, self.decompressor) + response = self.decoder(header.version, self.user_type_map, stream_id, + header.flags, header.opcode, body, self.decompressor) except Exception as exc: log.exception("Error decoding response from Cassandra. " "opcode: %04x; message contents: %r", header.opcode, body) diff --git a/cassandra/protocol.py b/cassandra/protocol.py index 9b544236ec..0aa9ae4846 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -83,29 +83,6 @@ class _MessageType(object): custom_payload = None warnings = None - def to_binary(self, stream_id, protocol_version, compression=None): - flags = 0 - body = io.BytesIO() - if self.custom_payload: - if protocol_version < 4: - raise UnsupportedOperation("Custom key/value payloads can only be used with protocol version 4 or higher") - flags |= CUSTOM_PAYLOAD_FLAG - write_bytesmap(body, self.custom_payload) - self.send_body(body, protocol_version) - body = body.getvalue() - - if compression and len(body) > 0: - body = compression(body) - flags |= COMPRESSED_FLAG - if self.tracing: - flags |= TRACING_FLAG - - msg = io.BytesIO() - write_header(msg, protocol_version, flags, stream_id, self.opcode, len(body)) - msg.write(body) - - return msg.getvalue() - def update_custom_payload(self, other): if other: if not self.custom_payload: @@ -126,50 +103,6 @@ def _get_params(message_obj): ) -def decode_response(protocol_version, user_type_map, stream_id, flags, opcode, body, - decompressor=None): - if flags & COMPRESSED_FLAG: - if decompressor is None: - raise Exception("No de-compressor available for compressed frame!") - body = decompressor(body) - flags ^= COMPRESSED_FLAG - - body = io.BytesIO(body) - if flags & TRACING_FLAG: - trace_id = UUID(bytes=body.read(16)) - flags ^= TRACING_FLAG - else: - trace_id = None - - if flags & WARNING_FLAG: - warnings = read_stringlist(body) - flags ^= WARNING_FLAG - else: - warnings = None - - if flags & CUSTOM_PAYLOAD_FLAG: - custom_payload = read_bytesmap(body) - flags ^= CUSTOM_PAYLOAD_FLAG - else: - custom_payload = None - - if flags: - log.warning("Unknown protocol flags set: %02x. May cause problems.", flags) - - msg_class = _message_types_by_opcode[opcode] - msg = msg_class.recv_body(body, protocol_version, user_type_map) - msg.stream_id = stream_id - msg.trace_id = trace_id - msg.custom_payload = custom_payload - msg.warnings = warnings - - if msg.warnings: - for w in msg.warnings: - log.warning("Server warning: %s", w) - - return msg - - error_classes = {} @@ -609,7 +542,7 @@ class ResultMessage(_MessageType): results = None paging_state = None - _type_codes = { + type_codes = { 0x0000: CUSTOM_TYPE, 0x0001: AsciiType, 0x0002: LongType, @@ -744,7 +677,7 @@ def recv_results_schema_change(cls, f, protocol_version): def read_type(cls, f, user_type_map): optid = read_short(f) try: - typeclass = cls._type_codes[optid] + typeclass = cls.type_codes[optid] except KeyError: raise NotSupportedError("Unknown data type code 0x%04x. Have to skip" " entire result set." % (optid,)) @@ -964,13 +897,122 @@ def recv_schema_change(cls, f, protocol_version): return event -def write_header(f, version, flags, stream_id, opcode, length): +class ProtocolHandler(object): + """ + ProtocolHander handles encoding and decoding messages. + + This class can be specialized to compose Handlers which implement alternative + result decoding or type deserialization. Class definitions are passed to :class:`cassandra.cluster.Cluster` + on initialization. + + Contracted class methods are :meth:`ProtocolHandler.encode_message` and :meth:`ProtocolHandler.decode_message`. + """ + + message_types_by_opcode = _message_types_by_opcode.copy() """ - Write a CQL protocol frame header. + Default mapping of opcode to Message implementation. The default ``decode_message`` implementation uses + this to instantiate a message and populate using ``recv_body``. This mapping can be updated to inject specialized + result decoding implementations. """ - pack = v3_header_pack if version >= 3 else header_pack - f.write(pack(version, flags, stream_id, opcode)) - write_int(f, length) + + @classmethod + def encode_message(cls, msg, stream_id, protocol_version, compressor): + """ + Encodes a message using the specified frame parameters, and compressor + + :param msg: the message, typically of cassandra.protocol._MessageType, generated by the driver + :param stream_id: protocol stream id for the frame header + :param protocol_version: version for the frame header, and used encoding contents + :param compressor: optional compression function to be used on the body + :return: + """ + flags = 0 + body = io.BytesIO() + if msg.custom_payload: + if protocol_version < 4: + raise UnsupportedOperation("Custom key/value payloads can only be used with protocol version 4 or higher") + flags |= CUSTOM_PAYLOAD_FLAG + write_bytesmap(body, msg.custom_payload) + msg.send_body(body, protocol_version) + body = body.getvalue() + + if compressor and len(body) > 0: + body = compressor(body) + flags |= COMPRESSED_FLAG + + if msg.tracing: + flags |= TRACING_FLAG + + buff = io.BytesIO() + cls._write_header(buff, protocol_version, flags, stream_id, msg.opcode, len(body)) + buff.write(body) + + return buff.getvalue() + + @staticmethod + def _write_header(f, version, flags, stream_id, opcode, length): + """ + Write a CQL protocol frame header. + """ + pack = v3_header_pack if version >= 3 else header_pack + f.write(pack(version, flags, stream_id, opcode)) + write_int(f, length) + + @classmethod + def decode_message(cls, protocol_version, user_type_map, stream_id, flags, opcode, body, + decompressor): + """ + Decodes a native protocol message body + + :param protocol_version: version to use decoding contents + :param user_type_map: map[keyspace name] = map[type name] = custom type to instantiate when deserializing this type + :param stream_id: native protocol stream id from the frame header + :param flags: native protocol flags bitmap from the header + :param opcode: native protocol opcode from the header + :param body: frame body + :param decompressor: optional decompression function to inflate the body + :return: a message decoded from the body and frame attributes + """ + if flags & COMPRESSED_FLAG: + if decompressor is None: + raise Exception("No de-compressor available for compressed frame!") + body = decompressor(body) + flags ^= COMPRESSED_FLAG + + body = io.BytesIO(body) + if flags & TRACING_FLAG: + trace_id = UUID(bytes=body.read(16)) + flags ^= TRACING_FLAG + else: + trace_id = None + + if flags & WARNING_FLAG: + warnings = read_stringlist(body) + flags ^= WARNING_FLAG + else: + warnings = None + + if flags & CUSTOM_PAYLOAD_FLAG: + custom_payload = read_bytesmap(body) + flags ^= CUSTOM_PAYLOAD_FLAG + else: + custom_payload = None + + if flags: + log.warning("Unknown protocol flags set: %02x. May cause problems.", flags) + + msg_class = cls.message_types_by_opcode[opcode] + msg = msg_class.recv_body(body, protocol_version, user_type_map) + msg.stream_id = stream_id + msg.trace_id = trace_id + msg.custom_payload = custom_payload + msg.warnings = warnings + + if msg.warnings: + for w in msg.warnings: + log.warning("Server warning: %s", w) + + return msg def read_byte(f): diff --git a/docs/api/cassandra/cluster.rst b/docs/api/cassandra/cluster.rst index 3edc22c82a..ad4272c988 100644 --- a/docs/api/cassandra/cluster.rst +++ b/docs/api/cassandra/cluster.rst @@ -27,6 +27,8 @@ .. autoattribute:: connection_class + .. autoattribute:: protocol_handler_class + .. autoattribute:: metrics_enabled .. autoattribute:: metrics diff --git a/docs/api/cassandra/protocol.rst b/docs/api/cassandra/protocol.rst index 52f1287a6c..cabf2b59dd 100644 --- a/docs/api/cassandra/protocol.rst +++ b/docs/api/cassandra/protocol.rst @@ -15,3 +15,12 @@ By default these are ignored by the server. They can be useful for servers imple a custom QueryHandler. See :meth:`.Session.execute`, ::meth:`.Session.execute_async`, :attr:`.ResponseFuture.custom_payload`. + +.. autoclass:: ProtocolHandler + + .. autoattribute:: message_types_by_opcode + :annotation: = {default mapping} + + .. automethod:: encode_message + + .. automethod:: decode_message diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 3568b1ffc6..057da5c80a 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -80,7 +80,7 @@ def _tuple_version(version_string): USE_CASS_EXTERNAL = bool(os.getenv('USE_CASS_EXTERNAL', False)) -default_cassandra_version = '2.1.5' +default_cassandra_version = '2.1.6' if USE_CASS_EXTERNAL: if CCMClusterFactory: From 0acb459da2f05bc65f9e3243bfaa66ef6681f6a4 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 23 Jun 2015 11:36:50 -0500 Subject: [PATCH 1536/3726] Cluster._make_connection_kwargs accept overrides for internal defaults Make CC use protocol.ProtocolHandler for internal connections, to avoid specialized implementations. --- cassandra/cluster.py | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 853a8fef85..c26e10f1a5 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -60,7 +60,8 @@ IsBootstrappingErrorMessage, BatchMessage, RESULT_KIND_PREPARED, RESULT_KIND_SET_KEYSPACE, RESULT_KIND_ROWS, - RESULT_KIND_SCHEMA_CHANGE, MIN_SUPPORTED_VERSION, ProtocolHandler) + RESULT_KIND_SCHEMA_CHANGE, MIN_SUPPORTED_VERSION, + ProtocolHandler) from cassandra.metadata import Metadata, protect_name, murmur3 from cassandra.policies import (TokenAwarePolicy, DCAwareRoundRobinPolicy, SimpleConvictionPolicy, ExponentialReconnectionPolicy, HostDistance, @@ -802,16 +803,16 @@ def _make_connection_factory(self, host, *args, **kwargs): def _make_connection_kwargs(self, address, kwargs_dict): if self._auth_provider_callable: - kwargs_dict['authenticator'] = self._auth_provider_callable(address) + kwargs_dict.setdefault('authenticator', self._auth_provider_callable(address)) - kwargs_dict['port'] = self.port - kwargs_dict['compression'] = self.compression - kwargs_dict['sockopts'] = self.sockopts - kwargs_dict['ssl_options'] = self.ssl_options - kwargs_dict['cql_version'] = self.cql_version - kwargs_dict['protocol_version'] = self.protocol_version - kwargs_dict['user_type_map'] = self._user_types - kwargs_dict['protocol_handler_class'] = self.protocol_handler_class + kwargs_dict.setdefault('port', self.port) + kwargs_dict.setdefault('compression', self.compression) + kwargs_dict.setdefault('sockopts', self.sockopts) + kwargs_dict.setdefault('ssl_options', self.ssl_options) + kwargs_dict.setdefault('cql_version', self.cql_version) + kwargs_dict.setdefault('protocol_version', self.protocol_version) + kwargs_dict.setdefault('user_type_map', self._user_types) + kwargs_dict.setdefault('protocol_handler_class', self.protocol_handler_class) return kwargs_dict @@ -1371,7 +1372,7 @@ def _prepare_all_queries(self, host): log.debug("Preparing all known prepared statements against host %s", host) connection = None try: - connection = self.connection_factory(host.address) + connection = self.connection_factory(host.address, protocol_handler_class=ProtocolHandler) try: self.control_connection.wait_for_schema_agreement(connection) except Exception: @@ -2130,7 +2131,7 @@ def _try_connect(self, host): while True: try: - connection = self._cluster.connection_factory(host.address, is_control_connection=True) + connection = self._cluster.connection_factory(host.address, is_control_connection=True, protocol_handler_class=ProtocolHandler) break except ProtocolVersionUnsupported as e: self._cluster.protocol_downgrade(host.address, e.startup_version) From 59d4612aaccfbc56e530aff6ad4ef191c8bc0c94 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 25 Jun 2015 08:49:47 -0500 Subject: [PATCH 1537/3726] cqle: Update invalid inet test with a string that won't parse Cassandra seems to be accepting various bad strings and inserting '92.242.140.2' (for many different strings). Need to follow up on that. Meanwhile, this string does induce an Invalid request. Side note: I'm not sure this test is worth having, unless there is supposed to be some kind of local validation. We're not testing the server here. --- tests/integration/cqlengine/columns/test_validation.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/integration/cqlengine/columns/test_validation.py b/tests/integration/cqlengine/columns/test_validation.py index ce8ab987c5..8732375446 100644 --- a/tests/integration/cqlengine/columns/test_validation.py +++ b/tests/integration/cqlengine/columns/test_validation.py @@ -392,6 +392,7 @@ def test_inet_saves(self): assert m.address == "192.168.1.1" def test_non_address_fails(self): + # TODO: presently this only tests that the server blows it up. Is there supposed to be local validation? with self.assertRaises(InvalidRequest): - self.InetTestModel.create(address="ham sandwich") + self.InetTestModel.create(address="what is going on here?") From 08f768aa2230c50c546dcb877fce305740bdfe00 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 25 Jun 2015 08:52:41 -0500 Subject: [PATCH 1538/3726] Changelog and release version update. --- CHANGELOG.rst | 21 ++++++++++++++++++++- cassandra/__init__.py | 2 +- 2 files changed, 21 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 41b79b7e54..a4111bd507 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,5 +1,24 @@ +2.6.0c2 +======= +June 24, 2015 + +Features +-------- +* Automatic Protocol Version Downgrade (PYTHON-240) +* cqlengine Python 2.6 compatibility (PYTHON-288) +* Double-dollar string quote UDF body (PYTHON-345) +* Set models.DEFAULT_KEYSPACE when calling set_session (github #352) + +Bug Fixes +--------- +* Avoid stall while connecting to mixed version cluster (PYTHON-303) +* Make SSL work with AsyncoreConnection in python 2.6.9 (PYTHON-322) +* Fix Murmur3Token.from_key() on Windows (PYTHON-331) +* Fix cqlengine TimeUUID rounding error for Windows (PYTHON-341) +* Avoid invalid compaction options in CQL export for non-SizeTiered (PYTHON-352) + 2.6.0c1 -===== +======= June 4, 2015 This release adds support for Cassandra 2.2 features, including version diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 059aeb00eb..258250cf83 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -23,7 +23,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (2, 6, '0c1', 'post') +__version_info__ = (2, 6, '0c2') __version__ = '.'.join(map(str, __version_info__)) From 1d9df6cafe2e0777647d76ca2d971234c37537d6 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 25 Jun 2015 09:01:25 -0500 Subject: [PATCH 1539/3726] Post release version update also removing travis status widget until the install/setup can be stabilized --- README.rst | 3 --- cassandra/__init__.py | 2 +- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/README.rst b/README.rst index bbb63a323b..a38b771303 100644 --- a/README.rst +++ b/README.rst @@ -1,9 +1,6 @@ DataStax Python Driver for Apache Cassandra =========================================== -.. image:: https://travis-ci.org/datastax/python-driver.png?branch=master - :target: https://travis-ci.org/datastax/python-driver - A modern, `feature-rich `_ and highly-tunable Python client library for Apache Cassandra (1.2+) and DataStax Enterprise (3.1+) using exclusively Cassandra's binary protocol and Cassandra Query Language v3. The driver supports Python 2.6, 2.7, 3.3, and 3.4*. diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 258250cf83..23d4623d24 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -23,7 +23,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (2, 6, '0c2') +__version_info__ = (2, 6, '0c2', 'post') __version__ = '.'.join(map(str, __version_info__)) From 0fca6e59a335bad455282f6661353348c9731b6f Mon Sep 17 00:00:00 2001 From: Tyler Hobbs Date: Tue, 30 Jun 2015 11:53:26 -0500 Subject: [PATCH 1540/3726] Multi-col COMPACT tables are incompatible if there are clustering cols If there are not any clustering columns, a COMPACT STORAGE table can have any number of non-PRIMARY KEY columns and still be CQL-compatible. See: https://issues.apache.org/jira/browse/CASSANDRA-9647 Done for https://datastax-oss.atlassian.net/browse/PYTHON-360 --- cassandra/metadata.py | 8 +++++-- tests/integration/standard/test_metadata.py | 25 ++++++++++++++++++++- 2 files changed, 30 insertions(+), 3 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 1ae5ea11ac..9668a8c2c4 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -1227,8 +1227,12 @@ def is_cql_compatible(self): """ # no such thing as DCT in CQL incompatible = issubclass(self.comparator, types.DynamicCompositeType) - # no compact storage with more than one column beyond PK - incompatible |= self.is_compact_storage and len(self.columns) > len(self.primary_key) + 1 + + # no compact storage with more than one column beyond PK if there + # are clustering columns + incompatible |= (self.is_compact_storage and + len(self.columns) > len(self.primary_key) + 1 and + len(self.clustering_key) >= 1) return not incompatible diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 611dce2307..1d7f68df57 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -17,7 +17,9 @@ except ImportError: import unittest # noqa -import difflib, six, sys +import difflib +import six +import sys from mock import Mock from cassandra import AlreadyExists, SignatureDescriptor, UserFunctionDescriptor, UserAggregateDescriptor @@ -231,6 +233,26 @@ def test_composite_in_compound_primary_key_compact(self): self.check_create_statement(tablemeta, create_statement) + def test_cql_compatibility(self): + # having more than one non-PK column is okay if there aren't any + # clustering columns + create_statement = self.make_create_statement(["a"], [], ["b", "c", "d"], compact=True) + self.session.execute(create_statement) + tablemeta = self.get_table_metadata() + + self.assertEqual([u'a'], [c.name for c in tablemeta.partition_key]) + self.assertEqual([], tablemeta.clustering_key) + self.assertEqual([u'a', u'b', u'c', u'd'], sorted(tablemeta.columns.keys())) + + self.assertTrue(tablemeta.is_cql_compatible) + + # ... but if there are clustering columns, it's not CQL compatible. + # This is a hacky way to simulate having clustering columns. + tablemeta.clustering_key = ["foo", "bar"] + tablemeta.columns["foo"] = None + tablemeta.columns["bar"] = None + self.assertFalse(tablemeta.is_cql_compatible) + def test_compound_primary_keys_ordering(self): create_statement = self.make_create_statement(["a"], ["b"], ["c"]) create_statement += " WITH CLUSTERING ORDER BY (b DESC)" @@ -592,6 +614,7 @@ def test_refresh_user_aggregate_metadata(self): cluster2.shutdown() + class TestCodeCoverage(unittest.TestCase): def test_export_schema(self): From 0a1e472ec45b66bb6309ca753643dd192911a42f Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 29 Jun 2015 15:31:28 -0500 Subject: [PATCH 1541/3726] Connection server_version property Lazily queried, set preemptively whenever a topo query is performed. --- cassandra/cluster.py | 5 +++-- cassandra/connection.py | 13 +++++++++++++ 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 828fa8266b..5996a70d69 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1981,7 +1981,6 @@ def __init__(self, control_connection, *args, **kwargs): self.control_connection = weakref.proxy(control_connection) def try_reconnect(self): - # we'll either get back a new Connection or a NoHostAvailable return self.control_connection._reconnect_internal() def on_reconnection(self, connection): @@ -2032,7 +2031,7 @@ class ControlConnection(object): _SELECT_TRIGGERS = "SELECT * FROM system.schema_triggers" _SELECT_PEERS = "SELECT peer, data_center, rack, tokens, rpc_address, schema_version FROM system.peers" - _SELECT_LOCAL = "SELECT cluster_name, data_center, rack, tokens, partitioner, schema_version FROM system.local WHERE key='local'" + _SELECT_LOCAL = "SELECT cluster_name, data_center, rack, tokens, partitioner, release_version, schema_version FROM system.local WHERE key='local'" _SELECT_SCHEMA_PEERS = "SELECT peer, rpc_address, schema_version FROM system.peers" _SELECT_SCHEMA_LOCAL = "SELECT schema_version FROM system.local WHERE key='local'" @@ -2448,6 +2447,8 @@ def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, if partitioner and tokens: token_map[host] = tokens + connection.server_version = local_row['release_version'] + # Check metadata.partitioner to see if we haven't built anything yet. If # every node in the cluster was in the contact points, we won't discover # any new nodes, so we need this additional check. (See PYTHON-90) diff --git a/cassandra/connection.py b/cassandra/connection.py index 464ec792ba..05fc68fa54 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -196,6 +196,7 @@ class Connection(object): is_unsupported_proto_version = False is_control_connection = False + _server_version = None _iobuf = None _current_frame = None @@ -737,6 +738,18 @@ def is_idle(self): def reset_idle(self): self.msg_received = False + @property + def server_version(self): + if self._server_version is None: + query_message = QueryMessage(query="SELECT release_version FROM system.local", consistency_level=ConsistencyLevel.ONE) + message = self.wait_for_response(query_message) + self._server_version = message.results[1][0][0] # (col names, rows)[rows][first row][only item] + return self._server_version + + @server_version.setter + def server_version(self, version): + self._server_version = version + def __str__(self): status = "" if self.is_defunct: From 28f8887b992183f347bea76b3167cb8a485cca47 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 1 Jul 2015 10:58:22 -0500 Subject: [PATCH 1542/3726] Refactor schema query/build into cassandra.metadata Changes in anticipation of supporting modernized schema tables (CASSANDRA-6717) - schema change response/events normalized according to protocol spec keywords -- events propagate with intrinsic keywords - Parsing is defined by connection server version - option to specialize query/parsing by earlier server versions (not implemented -- currently just refactored current conditional logic) --- cassandra/__init__.py | 14 + cassandra/cluster.py | 243 +---- cassandra/metadata.py | 1076 ++++++++++++------- cassandra/protocol.py | 16 +- tests/integration/standard/test_metadata.py | 2 +- tests/unit/test_control_connection.py | 30 +- tests/unit/test_metadata.py | 10 +- tests/unit/test_response_future.py | 8 +- 8 files changed, 764 insertions(+), 635 deletions(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 23d4623d24..ad073f7ccc 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -126,6 +126,20 @@ def consistency_value_to_name(value): return ConsistencyLevel.value_to_name[value] if value is not None else "Not Set" +class SchemaChangeType(object): + DROPPED = 'DROPPED' + CREATED = 'CREATED' + UPDATED = 'UPDATED' + + +class SchemaTargetType(object): + KEYSPACE = 'KEYSPACE' + TABLE = 'TABLE' + TYPE = 'TYPE' + FUNCTION = 'FUNCTION' + AGGREGATE = 'AGGREGATE' + + class SignatureDescriptor(object): def __init__(self, name, type_signature): diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 5996a70d69..28d808eeca 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -44,8 +44,8 @@ from itertools import groupby from cassandra import (ConsistencyLevel, AuthenticationFailed, - InvalidRequest, OperationTimedOut, - UnsupportedOperation, Unauthorized) + OperationTimedOut, UnsupportedOperation, + SchemaTargetType) from cassandra.connection import (ConnectionException, ConnectionShutdown, ConnectionHeartbeat, ProtocolVersionUnsupported) from cassandra.cqltypes import UserType @@ -79,6 +79,7 @@ def _is_eventlet_monkey_patched(): import eventlet.patcher return eventlet.patcher.is_monkey_patched('socket') + def _is_gevent_monkey_patched(): if 'gevent.monkey' not in sys.modules: return False @@ -1201,13 +1202,28 @@ def _ensure_core_connections(self): for pool in session._pools.values(): pool.ensure_core_connections() - def _validate_refresh_schema(self, keyspace, table, usertype, function, aggregate): + @staticmethod + def _validate_refresh_schema(keyspace, table, usertype, function, aggregate): if any((table, usertype, function, aggregate)): if not keyspace: raise ValueError("keyspace is required to refresh specific sub-entity {table, usertype, function, aggregate}") if sum(1 for e in (table, usertype, function) if e) > 1: raise ValueError("{table, usertype, function, aggregate} are mutually exclusive") + @staticmethod + def _target_type_from_refresh_args(keyspace, table, usertype, function, aggregate): + if aggregate: + return SchemaTargetType.AGGREGATE + elif function: + return SchemaTargetType.FUNCTION + elif usertype: + return SchemaTargetType.TYPE + elif table: + return SchemaTargetType.TABLE + elif keyspace: + return SchemaTargetType.KEYSPACE + return None + def refresh_schema(self, keyspace=None, table=None, usertype=None, function=None, aggregate=None, max_schema_agreement_wait=None): """ .. deprecated:: 2.6.0 @@ -1242,8 +1258,10 @@ def refresh_schema(self, keyspace=None, table=None, usertype=None, function=None log.warning(msg) self._validate_refresh_schema(keyspace, table, usertype, function, aggregate) - if not self.control_connection.refresh_schema(keyspace, table, usertype, function, - aggregate, max_schema_agreement_wait): + target_type = self._target_type_from_refresh_args(keyspace, table, usertype, function, aggregate) + if not self.control_connection.refresh_schema(target_type=target_type, keyspace=keyspace, table=table, + type=usertype, function=function, aggregate=aggregate, + schema_agreement_wait=max_schema_agreement_wait): raise Exception("Schema was not refreshed. See log for details.") def submit_schema_refresh(self, keyspace=None, table=None, usertype=None, function=None, aggregate=None): @@ -1259,8 +1277,10 @@ def submit_schema_refresh(self, keyspace=None, table=None, usertype=None, functi log.warning(msg) self._validate_refresh_schema(keyspace, table, usertype, function, aggregate) + target_type = self._target_type_from_refresh_args(keyspace, table, usertype, function, aggregate) return self.executor.submit( - self.control_connection.refresh_schema, keyspace, table, usertype, function, aggregate) + self.control_connection.refresh_schema, target_type=target_type, keyspace=keyspace, table=table, + type=usertype, function=function, aggregate=aggregate) def refresh_schema_metadata(self, max_schema_agreement_wait=None): """ @@ -1285,7 +1305,8 @@ def refresh_keyspace_metadata(self, keyspace, max_schema_agreement_wait=None): See :meth:`~.Cluster.refresh_schema_metadata` for description of ``max_schema_agreement_wait`` behavior """ - if not self.control_connection.refresh_schema(keyspace, schema_agreement_wait=max_schema_agreement_wait): + if not self.control_connection.refresh_schema(target_type=SchemaTargetType.KEYSPACE, keyspace=keyspace, + schema_agreement_wait=max_schema_agreement_wait): raise Exception("Keyspace metadata was not refreshed. See log for details.") def refresh_table_metadata(self, keyspace, table, max_schema_agreement_wait=None): @@ -1295,7 +1316,7 @@ def refresh_table_metadata(self, keyspace, table, max_schema_agreement_wait=None See :meth:`~.Cluster.refresh_schema_metadata` for description of ``max_schema_agreement_wait`` behavior """ - if not self.control_connection.refresh_schema(keyspace, table, schema_agreement_wait=max_schema_agreement_wait): + if not self.control_connection.refresh_schema(target_type=SchemaTargetType.TABLE, keyspace=keyspace, table=table, schema_agreement_wait=max_schema_agreement_wait): raise Exception("Table metadata was not refreshed. See log for details.") def refresh_user_type_metadata(self, keyspace, user_type, max_schema_agreement_wait=None): @@ -1304,7 +1325,7 @@ def refresh_user_type_metadata(self, keyspace, user_type, max_schema_agreement_w See :meth:`~.Cluster.refresh_schema_metadata` for description of ``max_schema_agreement_wait`` behavior """ - if not self.control_connection.refresh_schema(keyspace, usertype=user_type, schema_agreement_wait=max_schema_agreement_wait): + if not self.control_connection.refresh_schema(target_type=SchemaTargetType.TYPE, keyspace=keyspace, type=user_type, schema_agreement_wait=max_schema_agreement_wait): raise Exception("User Type metadata was not refreshed. See log for details.") def refresh_user_function_metadata(self, keyspace, function, max_schema_agreement_wait=None): @@ -1315,7 +1336,7 @@ def refresh_user_function_metadata(self, keyspace, function, max_schema_agreemen See :meth:`~.Cluster.refresh_schema_metadata` for description of ``max_schema_agreement_wait`` behavior """ - if not self.control_connection.refresh_schema(keyspace, function=function, schema_agreement_wait=max_schema_agreement_wait): + if not self.control_connection.refresh_schema(target_type=SchemaTargetType.FUNCTION, keyspace=keyspace, function=function, schema_agreement_wait=max_schema_agreement_wait): raise Exception("User Function metadata was not refreshed. See log for details.") def refresh_user_aggregate_metadata(self, keyspace, aggregate, max_schema_agreement_wait=None): @@ -1326,7 +1347,7 @@ def refresh_user_aggregate_metadata(self, keyspace, aggregate, max_schema_agreem See :meth:`~.Cluster.refresh_schema_metadata` for description of ``max_schema_agreement_wait`` behavior """ - if not self.control_connection.refresh_schema(keyspace, aggregate=aggregate, schema_agreement_wait=max_schema_agreement_wait): + if not self.control_connection.refresh_schema(target_type=SchemaTargetType.AGGREGATE, keyspace=keyspace, aggregate=aggregate, schema_agreement_wait=max_schema_agreement_wait): raise Exception("User Aggregate metadata was not refreshed. See log for details.") def refresh_nodes(self): @@ -2022,14 +2043,6 @@ class ControlConnection(object): Internal """ - _SELECT_KEYSPACES = "SELECT * FROM system.schema_keyspaces" - _SELECT_COLUMN_FAMILIES = "SELECT * FROM system.schema_columnfamilies" - _SELECT_COLUMNS = "SELECT * FROM system.schema_columns" - _SELECT_USERTYPES = "SELECT * FROM system.schema_usertypes" - _SELECT_FUNCTIONS = "SELECT * FROM system.schema_functions" - _SELECT_AGGREGATES = "SELECT * FROM system.schema_aggregates" - _SELECT_TRIGGERS = "SELECT * FROM system.schema_triggers" - _SELECT_PEERS = "SELECT peer, data_center, rack, tokens, rpc_address, schema_version FROM system.peers" _SELECT_LOCAL = "SELECT cluster_name, data_center, rack, tokens, partitioner, release_version, schema_version FROM system.local WHERE key='local'" @@ -2219,16 +2232,14 @@ def shutdown(self): self._connection.close() del self._connection - def refresh_schema(self, keyspace=None, table=None, usertype=None, function=None, - aggregate=None, schema_agreement_wait=None): + def refresh_schema(self, **kwargs): if not self._meta_refresh_enabled: log.debug("[control connection] Skipping schema refresh because meta refresh is disabled") return False try: if self._connection: - return self._refresh_schema(self._connection, keyspace, table, usertype, function, - aggregate, schema_agreement_wait=schema_agreement_wait) + return self._refresh_schema(self._connection, **kwargs) except ReferenceError: pass # our weak reference to the Cluster is no good except Exception: @@ -2236,13 +2247,10 @@ def refresh_schema(self, keyspace=None, table=None, usertype=None, function=None self._signal_error() return False - def _refresh_schema(self, connection, keyspace=None, table=None, usertype=None, function=None, - aggregate=None, preloaded_results=None, schema_agreement_wait=None): + def _refresh_schema(self, connection, preloaded_results=None, schema_agreement_wait=None, **kwargs): if self._cluster.is_shutdown: return False - assert sum(1 for arg in (table, usertype, function, aggregate) if arg) <= 1 - agreed = self.wait_for_schema_agreement(connection, preloaded_results=preloaded_results, wait_time=schema_agreement_wait) @@ -2250,148 +2258,8 @@ def _refresh_schema(self, connection, keyspace=None, table=None, usertype=None, log.debug("Skipping schema refresh due to lack of schema agreement") return False - cl = ConsistencyLevel.ONE - if table: - def _handle_results(success, result): - if success: - return dict_factory(*result.results) if result else {} - else: - raise result - - # a particular table changed - where_clause = " WHERE keyspace_name = '%s' AND columnfamily_name = '%s'" % (keyspace, table) - cf_query = QueryMessage(query=self._SELECT_COLUMN_FAMILIES + where_clause, consistency_level=cl) - col_query = QueryMessage(query=self._SELECT_COLUMNS + where_clause, consistency_level=cl) - triggers_query = QueryMessage(query=self._SELECT_TRIGGERS + where_clause, consistency_level=cl) - (cf_success, cf_result), (col_success, col_result), (triggers_success, triggers_result) \ - = connection.wait_for_responses(cf_query, col_query, triggers_query, timeout=self._timeout, fail_on_error=False) - - log.debug("[control connection] Fetched table info for %s.%s, rebuilding metadata", keyspace, table) - cf_result = _handle_results(cf_success, cf_result) - col_result = _handle_results(col_success, col_result) - - # handle the triggers table not existing in Cassandra 1.2 - if not triggers_success and isinstance(triggers_result, InvalidRequest): - triggers_result = {} - else: - triggers_result = _handle_results(triggers_success, triggers_result) - - self._cluster.metadata.table_changed(keyspace, table, cf_result, col_result, triggers_result) - elif usertype: - # user defined types within this keyspace changed - where_clause = " WHERE keyspace_name = '%s' AND type_name = '%s'" % (keyspace, usertype) - types_query = QueryMessage(query=self._SELECT_USERTYPES + where_clause, consistency_level=cl) - types_result = connection.wait_for_response(types_query) - log.debug("[control connection] Fetched user type info for %s.%s, rebuilding metadata", keyspace, usertype) - types_result = dict_factory(*types_result.results) if types_result.results else {} - self._cluster.metadata.usertype_changed(keyspace, usertype, types_result) - elif function: - # user defined function within this keyspace changed - where_clause = " WHERE keyspace_name = '%s' AND function_name = '%s' AND signature = [%s]" \ - % (keyspace, function.name, ','.join("'%s'" % t for t in function.type_signature)) - functions_query = QueryMessage(query=self._SELECT_FUNCTIONS + where_clause, consistency_level=cl) - functions_result = connection.wait_for_response(functions_query) - log.debug("[control connection] Fetched user function info for %s.%s, rebuilding metadata", keyspace, function.signature) - functions_result = dict_factory(*functions_result.results) if functions_result.results else {} - self._cluster.metadata.function_changed(keyspace, function, functions_result) - elif aggregate: - # user defined aggregate within this keyspace changed - where_clause = " WHERE keyspace_name = '%s' AND aggregate_name = '%s' AND signature = [%s]" \ - % (keyspace, aggregate.name, ','.join("'%s'" % t for t in aggregate.type_signature)) - aggregates_query = QueryMessage(query=self._SELECT_AGGREGATES + where_clause, consistency_level=cl) - aggregates_result = connection.wait_for_response(aggregates_query) - log.debug("[control connection] Fetched user aggregate info for %s.%s, rebuilding metadata", keyspace, aggregate.signature) - aggregates_result = dict_factory(*aggregates_result.results) if aggregates_result.results else {} - self._cluster.metadata.aggregate_changed(keyspace, aggregate, aggregates_result) - elif keyspace: - # only the keyspace itself changed (such as replication settings) - where_clause = " WHERE keyspace_name = '%s'" % (keyspace,) - ks_query = QueryMessage(query=self._SELECT_KEYSPACES + where_clause, consistency_level=cl) - ks_result = connection.wait_for_response(ks_query) - log.debug("[control connection] Fetched keyspace info for %s, rebuilding metadata", keyspace) - ks_result = dict_factory(*ks_result.results) if ks_result.results else {} - self._cluster.metadata.keyspace_changed(keyspace, ks_result) - else: - # build everything from scratch - queries = [ - QueryMessage(query=self._SELECT_KEYSPACES, consistency_level=cl), - QueryMessage(query=self._SELECT_COLUMN_FAMILIES, consistency_level=cl), - QueryMessage(query=self._SELECT_COLUMNS, consistency_level=cl), - QueryMessage(query=self._SELECT_USERTYPES, consistency_level=cl), - QueryMessage(query=self._SELECT_FUNCTIONS, consistency_level=cl), - QueryMessage(query=self._SELECT_AGGREGATES, consistency_level=cl), - QueryMessage(query=self._SELECT_TRIGGERS, consistency_level=cl) - ] - - responses = connection.wait_for_responses(*queries, timeout=self._timeout, fail_on_error=False) - (ks_success, ks_result), (cf_success, cf_result), \ - (col_success, col_result), (types_success, types_result), \ - (functions_success, functions_result), \ - (aggregates_success, aggregates_result), \ - (trigger_success, triggers_result) = responses - - if ks_success: - ks_result = dict_factory(*ks_result.results) - else: - raise ks_result - - if cf_success: - cf_result = dict_factory(*cf_result.results) - else: - raise cf_result - - if col_success: - col_result = dict_factory(*col_result.results) - else: - raise col_result - - # if we're connected to Cassandra < 2.0, the trigges table will not exist - if trigger_success: - triggers_result = dict_factory(*triggers_result.results) - else: - if isinstance(triggers_result, InvalidRequest): - log.debug("[control connection] triggers table not found") - triggers_result = {} - elif isinstance(triggers_result, Unauthorized): - log.warning("[control connection] this version of Cassandra does not allow access to schema_triggers metadata with authorization enabled (CASSANDRA-7967); " - "The driver will operate normally, but will not reflect triggers in the local metadata model, or schema strings.") - triggers_result = {} - else: - raise triggers_result - - # if we're connected to Cassandra < 2.1, the usertypes table will not exist - if types_success: - types_result = dict_factory(*types_result.results) if types_result.results else {} - else: - if isinstance(types_result, InvalidRequest): - log.debug("[control connection] user types table not found") - types_result = {} - else: - raise types_result - - # functions were introduced in Cassandra 2.2 - if functions_success: - functions_result = dict_factory(*functions_result.results) if functions_result.results else {} - else: - if isinstance(functions_result, InvalidRequest): - log.debug("[control connection] user functions table not found") - functions_result = {} - else: - raise functions_result - - # aggregates were introduced in Cassandra 2.2 - if aggregates_success: - aggregates_result = dict_factory(*aggregates_result.results) if aggregates_result.results else {} - else: - if isinstance(aggregates_result, InvalidRequest): - log.debug("[control connection] user aggregates table not found") - aggregates_result = {} - else: - raise aggregates_result + self._cluster.metadata.refresh(connection, self._timeout, **kwargs) - log.debug("[control connection] Fetched schema, rebuilding metadata") - self._cluster.metadata.rebuild_schema(ks_result, types_result, functions_result, - aggregates_result, cf_result, col_result, triggers_result) return True def refresh_node_list_and_token_map(self, force_token_rebuild=False): @@ -2538,13 +2406,8 @@ def _handle_status_change(self, event): def _handle_schema_change(self, event): if self._schema_event_refresh_window < 0: return - keyspace = event.get('keyspace') - table = event.get('table') - usertype = event.get('type') - function = event.get('function') - aggregate = event.get('aggregate') delay = random() * self._schema_event_refresh_window - self._cluster.scheduler.schedule_unique(delay, self.refresh_schema, keyspace, table, usertype, function, aggregate) + self._cluster.scheduler.schedule_unique(delay, self.refresh_schema, **event) def wait_for_schema_agreement(self, connection=None, preloaded_results=None, wait_time=None): @@ -2727,11 +2590,11 @@ def shutdown(self): self.is_shutdown = True self._queue.put_nowait((0, None)) - def schedule(self, delay, fn, *args): - self._insert_task(delay, (fn, args)) + def schedule(self, delay, fn, *args, **kwargs): + self._insert_task(delay, (fn, args, tuple(kwargs.items()))) - def schedule_unique(self, delay, fn, *args): - task = (fn, args) + def schedule_unique(self, delay, fn, *args, **kwargs): + task = (fn, args, tuple(kwargs.items())) if task not in self._scheduled_tasks: self._insert_task(delay, task) else: @@ -2758,8 +2621,9 @@ def run(self): return if run_at <= time.time(): self._scheduled_tasks.remove(task) - fn, args = task - future = self._executor.submit(fn, *args) + fn, args, kwargs = task + kwargs = dict(kwargs) + future = self._executor.submit(fn, *args, **kwargs) future.add_done_callback(self._log_if_failed) else: self._queue.put_nowait((run_at, task)) @@ -2777,20 +2641,18 @@ def _log_if_failed(self, future): exc_info=exc) -def refresh_schema_and_set_result(keyspace, table, usertype, function, aggregate, control_conn, response_future): +def refresh_schema_and_set_result(control_conn, response_future, **kwargs): try: if control_conn._meta_refresh_enabled: log.debug("Refreshing schema in response to schema change. " - "Keyspace: %s; Table: %s, Type: %s, Function: %s, Aggregate: %s", - keyspace, table, usertype, function, aggregate) - control_conn._refresh_schema(response_future._connection, keyspace, table, usertype, function, aggregate) + "%s", kwargs) + control_conn._refresh_schema(response_future._connection, **kwargs) else: log.debug("Skipping schema refresh in response to schema change because meta refresh is disabled; " - "Keyspace: %s; Table: %s, Type: %s, Function: %s", keyspace, table, usertype, function, aggregate) + "%s", kwargs) except Exception: log.exception("Exception refreshing schema in response to schema change:") - response_future.session.submit( - control_conn.refresh_schema, keyspace, table, usertype, function, aggregate) + response_future.session.submit(control_conn.refresh_schema, **kwargs) finally: response_future._set_final_result(None) @@ -3015,13 +2877,8 @@ def _set_result(self, response): # thread instead of the event loop thread self.session.submit( refresh_schema_and_set_result, - response.results['keyspace'], - response.results.get('table'), - response.results.get('type'), - response.results.get('function'), - response.results.get('aggregate'), self.session.cluster.control_connection, - self) + self, **response.results) else: results = getattr(response, 'results', None) if results is not None and response.kind == RESULT_KIND_ROWS: diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 9668a8c2c4..73c7687c6c 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -29,10 +29,12 @@ except ImportError as e: pass -from cassandra import SignatureDescriptor +from cassandra import SignatureDescriptor, ConsistencyLevel, InvalidRequest, SchemaChangeType, Unauthorized import cassandra.cqltypes as types from cassandra.encoder import Encoder from cassandra.marshal import varint_unpack +from cassandra.protocol import QueryMessage +from cassandra.query import dict_factory from cassandra.util import OrderedDict log = logging.getLogger(__name__) @@ -110,64 +112,38 @@ def export_schema_as_string(self): """ return "\n".join(ks.export_as_string() for ks in self.keyspaces.values()) - def rebuild_schema(self, ks_results, type_results, function_results, - aggregate_results, cf_results, col_results, triggers_result): - """ - Rebuild the view of the current schema from a fresh set of rows from - the system schema tables. - - For internal use only. - """ - cf_def_rows = defaultdict(list) - col_def_rows = defaultdict(lambda: defaultdict(list)) - usertype_rows = defaultdict(list) - fn_rows = defaultdict(list) - agg_rows = defaultdict(list) - trigger_rows = defaultdict(lambda: defaultdict(list)) - - for row in cf_results: - cf_def_rows[row["keyspace_name"]].append(row) - - for row in col_results: - ksname = row["keyspace_name"] - cfname = row["columnfamily_name"] - col_def_rows[ksname][cfname].append(row) + def refresh(self, connection, timeout, target_type=None, change_type=None, **kwargs): - for row in type_results: - usertype_rows[row["keyspace_name"]].append(row) - - for row in function_results: - fn_rows[row["keyspace_name"]].append(row) + if not target_type: + self._rebuild_all(connection, timeout) + return - for row in aggregate_results: - agg_rows[row["keyspace_name"]].append(row) + tt_lower = target_type.lower() + try: + if change_type == SchemaChangeType.DROPPED: + drop_method = getattr(self, '_drop_' + tt_lower) + drop_method(**kwargs) + else: + parser = get_schema_parser(connection, timeout) + parse_method = getattr(parser, 'get_' + tt_lower) + meta = parse_method(**kwargs) + if meta: + update_method = getattr(self, '_update_' + tt_lower) + update_method(meta) + else: + drop_method = getattr(self, '_drop_' + tt_lower) + drop_method(**kwargs) + except AttributeError: + raise ValueError("Unknown schema target_type: '%s'" % target_type) - for row in triggers_result: - ksname = row["keyspace_name"] - cfname = row["columnfamily_name"] - trigger_rows[ksname][cfname].append(row) + def _rebuild_all(self, connection, timeout): + """ + For internal use only. + """ + parser = get_schema_parser(connection, timeout) current_keyspaces = set() - for row in ks_results: - keyspace_meta = self._build_keyspace_metadata(row) - keyspace_col_rows = col_def_rows.get(keyspace_meta.name, {}) - keyspace_trigger_rows = trigger_rows.get(keyspace_meta.name, {}) - for table_row in cf_def_rows.get(keyspace_meta.name, []): - table_meta = self._build_table_metadata(keyspace_meta, table_row, keyspace_col_rows, keyspace_trigger_rows) - keyspace_meta._add_table_metadata(table_meta) - - for usertype_row in usertype_rows.get(keyspace_meta.name, []): - usertype = self._build_usertype(keyspace_meta.name, usertype_row) - keyspace_meta.user_types[usertype.name] = usertype - - for fn_row in fn_rows.get(keyspace_meta.name, []): - fn = self._build_function(keyspace_meta.name, fn_row) - keyspace_meta.functions[fn.signature] = fn - - for agg_row in agg_rows.get(keyspace_meta.name, []): - agg = self._build_aggregate(keyspace_meta.name, agg_row) - keyspace_meta.aggregates[agg.signature] = agg - + for keyspace_meta in parser.get_all_keyspaces(): current_keyspaces.add(keyspace_meta.name) old_keyspace_meta = self.keyspaces.get(keyspace_meta.name, None) self.keyspaces[keyspace_meta.name] = keyspace_meta @@ -184,16 +160,10 @@ def rebuild_schema(self, ks_results, type_results, function_results, for ksname in removed_keyspaces: self._keyspace_removed(ksname) - def keyspace_changed(self, keyspace, ks_results): - if not ks_results: - if keyspace in self.keyspaces: - del self.keyspaces[keyspace] - self._keyspace_removed(keyspace) - return - - keyspace_meta = self._build_keyspace_metadata(ks_results[0]) - old_keyspace_meta = self.keyspaces.get(keyspace, None) - self.keyspaces[keyspace] = keyspace_meta + def _update_keyspace(self, keyspace_meta): + ks_name = keyspace_meta.name + old_keyspace_meta = self.keyspaces.get(ks_name, None) + self.keyspaces[ks_name] = keyspace_meta if old_keyspace_meta: keyspace_meta.tables = old_keyspace_meta.tables keyspace_meta.user_types = old_keyspace_meta.user_types @@ -201,50 +171,69 @@ def keyspace_changed(self, keyspace, ks_results): keyspace_meta.functions = old_keyspace_meta.functions keyspace_meta.aggregates = old_keyspace_meta.aggregates if (keyspace_meta.replication_strategy != old_keyspace_meta.replication_strategy): - self._keyspace_updated(keyspace) - else: - self._keyspace_added(keyspace) - - def usertype_changed(self, keyspace, name, type_results): - if type_results: - new_usertype = self._build_usertype(keyspace, type_results[0]) - self.keyspaces[keyspace].user_types[name] = new_usertype + self._keyspace_updated(ks_name) else: - # the type was deleted - self.keyspaces[keyspace].user_types.pop(name, None) + self._keyspace_added(ks_name) - def function_changed(self, keyspace, function, function_results): - if function_results: - new_function = self._build_function(keyspace, function_results[0]) - self.keyspaces[keyspace].functions[function.signature] = new_function - else: - # the function was deleted - self.keyspaces[keyspace].functions.pop(function.signature, None) + def _drop_keyspace(self, keyspace): + if self.keyspaces.pop(keyspace, None): + self._keyspace_removed(keyspace) - def aggregate_changed(self, keyspace, aggregate, aggregate_results): - if aggregate_results: - new_aggregate = self._build_aggregate(keyspace, aggregate_results[0]) - self.keyspaces[keyspace].aggregates[aggregate.signature] = new_aggregate - else: - # the aggregate was deleted - self.keyspaces[keyspace].aggregates.pop(aggregate.signature, None) + def _update_table(self, table_meta): + try: + keyspace_meta = self.keyspaces[table_meta.keyspace_name] + table_meta.keyspace = keyspace_meta # temporary while TableMetadata.keyspace is deprecated + keyspace_meta._add_table_metadata(table_meta) + except KeyError: + # can happen if keyspace disappears while processing async event + pass - def table_changed(self, keyspace, table, cf_results, col_results, triggers_result): + def _drop_table(self, keyspace, table): try: keyspace_meta = self.keyspaces[keyspace] + keyspace_meta._drop_table_metadata(table) except KeyError: - # we're trying to update a table in a keyspace we don't know about - log.error("Tried to update schema for table '%s' in unknown keyspace '%s'", - table, keyspace) - return + # can happen if keyspace disappears while processing async event + pass - if not cf_results: - # the table was removed - keyspace_meta._drop_table_metadata(table) - else: - assert len(cf_results) == 1 - table_meta = self._build_table_metadata(keyspace_meta, cf_results[0], {table: col_results}, {table: triggers_result}) - keyspace_meta._add_table_metadata(table_meta) + def _update_type(self, type_meta): + try: + self.keyspaces[type_meta.keyspace].user_types[type_meta.name] = type_meta + except KeyError: + # can happen if keyspace disappears while processing async event + pass + + def _drop_type(self, keyspace, type): + try: + self.keyspaces[keyspace].user_types.pop(type, None) + except KeyError: + # can happen if keyspace disappears while processing async event + pass + + def _update_function(self, function_meta): + try: + self.keyspaces[function_meta.keyspace].functions[function_meta.signature] = function_meta + except KeyError: + # can happen if keyspace disappears while processing async event + pass + + def _drop_function(self, keyspace, function): + try: + self.keyspaces[keyspace].functions.pop(function.signature, None) + except KeyError: + pass + + def _update_aggregate(self, aggregate_meta): + try: + self.keyspaces[aggregate_meta.keyspace].aggregates[aggregate_meta.signature] = aggregate_meta + except KeyError: + pass + + def _drop_aggregate(self, keyspace, aggregate): + try: + self.keyspaces[keyspace].aggregates.pop(aggregate.signature, None) + except KeyError: + pass def _keyspace_added(self, ksname): if self.token_map: @@ -258,337 +247,118 @@ def _keyspace_removed(self, ksname): if self.token_map: self.token_map.remove_keyspace(ksname) - def _build_keyspace_metadata(self, row): - name = row["keyspace_name"] - durable_writes = row["durable_writes"] - strategy_class = row["strategy_class"] - strategy_options = json.loads(row["strategy_options"]) - return KeyspaceMetadata(name, durable_writes, strategy_class, strategy_options) + def rebuild_token_map(self, partitioner, token_map): + """ + Rebuild our view of the topology from fresh rows from the + system topology tables. + For internal use only. + """ + self.partitioner = partitioner + if partitioner.endswith('RandomPartitioner'): + token_class = MD5Token + elif partitioner.endswith('Murmur3Partitioner'): + token_class = Murmur3Token + elif partitioner.endswith('ByteOrderedPartitioner'): + token_class = BytesToken + else: + self.token_map = None + return - def _build_usertype(self, keyspace, usertype_row): - type_classes = list(map(types.lookup_casstype, usertype_row['field_types'])) - return UserType(usertype_row['keyspace_name'], usertype_row['type_name'], - usertype_row['field_names'], type_classes) + token_to_host_owner = {} + ring = [] + for host, token_strings in six.iteritems(token_map): + for token_string in token_strings: + token = token_class(token_string) + ring.append(token) + token_to_host_owner[token] = host - def _build_function(self, keyspace, function_row): - return_type = types.lookup_casstype(function_row['return_type']) - return Function(function_row['keyspace_name'], function_row['function_name'], - function_row['signature'], function_row['argument_names'], - return_type, function_row['language'], function_row['body'], - function_row['called_on_null_input']) + all_tokens = sorted(ring) + self.token_map = TokenMap( + token_class, token_to_host_owner, all_tokens, self) - def _build_aggregate(self, keyspace, aggregate_row): - state_type = types.lookup_casstype(aggregate_row['state_type']) - initial_condition = aggregate_row['initcond'] - if initial_condition is not None: - initial_condition = state_type.deserialize(initial_condition, 3) - return_type = types.lookup_casstype(aggregate_row['return_type']) - return Aggregate(aggregate_row['keyspace_name'], aggregate_row['aggregate_name'], - aggregate_row['signature'], aggregate_row['state_func'], state_type, - aggregate_row['final_func'], initial_condition, return_type) + def get_replicas(self, keyspace, key): + """ + Returns a list of :class:`.Host` instances that are replicas for a given + partition key. + """ + t = self.token_map + if not t: + return [] + try: + return t.get_replicas(keyspace, t.token_class.from_key(key)) + except NoMurmur3: + return [] - def _build_table_metadata(self, keyspace_metadata, row, col_rows, trigger_rows): - cfname = row["columnfamily_name"] - cf_col_rows = col_rows.get(cfname, []) + def can_support_partitioner(self): + if self.partitioner.endswith('Murmur3Partitioner') and murmur3 is None: + return False + else: + return True - if not cf_col_rows: # CASSANDRA-8487 - log.warning("Building table metadata with no column meta for %s.%s", - keyspace_metadata.name, cfname) + def add_or_return_host(self, host): + """ + Returns a tuple (host, new), where ``host`` is a Host + instance, and ``new`` is a bool indicating whether + the host was newly added. + """ + with self._hosts_lock: + try: + return self._hosts[host.address], False + except KeyError: + self._hosts[host.address] = host + return host, True - comparator = types.lookup_casstype(row["comparator"]) + def remove_host(self, host): + with self._hosts_lock: + return bool(self._hosts.pop(host.address, False)) - if issubclass(comparator, types.CompositeType): - column_name_types = comparator.subtypes - is_composite_comparator = True - else: - column_name_types = (comparator,) - is_composite_comparator = False + def get_host(self, address): + return self._hosts.get(address) - num_column_name_components = len(column_name_types) - last_col = column_name_types[-1] + def all_hosts(self): + """ + Returns a list of all known :class:`.Host` instances in the cluster. + """ + with self._hosts_lock: + return self._hosts.values() - column_aliases = row.get("column_aliases", None) - clustering_rows = [r for r in cf_col_rows - if r.get('type', None) == "clustering_key"] - if len(clustering_rows) > 1: - clustering_rows = sorted(clustering_rows, key=lambda row: row.get('component_index')) +REPLICATION_STRATEGY_CLASS_PREFIX = "org.apache.cassandra.locator." - if column_aliases is not None: - column_aliases = json.loads(column_aliases) - else: - column_aliases = [r.get('column_name') for r in clustering_rows] - if is_composite_comparator: - if issubclass(last_col, types.ColumnToCollectionType): - # collections - is_compact = False - has_value = False - clustering_size = num_column_name_components - 2 - elif (len(column_aliases) == num_column_name_components - 1 - and issubclass(last_col, types.UTF8Type)): - # aliases? - is_compact = False - has_value = False - clustering_size = num_column_name_components - 1 - else: - # compact table - is_compact = True - has_value = column_aliases or not cf_col_rows - clustering_size = num_column_name_components +def trim_if_startswith(s, prefix): + if s.startswith(prefix): + return s[len(prefix):] + return s - # Some thrift tables define names in composite types (see PYTHON-192) - if not column_aliases and hasattr(comparator, 'fieldnames'): - column_aliases = comparator.fieldnames - else: - is_compact = True - if column_aliases or not cf_col_rows: - has_value = True - clustering_size = num_column_name_components - else: - has_value = False - clustering_size = 0 - table_meta = TableMetadata(keyspace_metadata, cfname) - table_meta.comparator = comparator +_replication_strategies = {} - # partition key - partition_rows = [r for r in cf_col_rows - if r.get('type', None) == "partition_key"] - if len(partition_rows) > 1: - partition_rows = sorted(partition_rows, key=lambda row: row.get('component_index')) +class ReplicationStrategyTypeType(type): + def __new__(metacls, name, bases, dct): + dct.setdefault('name', name) + cls = type.__new__(metacls, name, bases, dct) + if not name.startswith('_'): + _replication_strategies[name] = cls + return cls - key_aliases = row.get("key_aliases") - if key_aliases is not None: - key_aliases = json.loads(key_aliases) if key_aliases else [] - else: - # In 2.0+, we can use the 'type' column. In 3.0+, we have to use it. - key_aliases = [r.get('column_name') for r in partition_rows] - key_validator = row.get("key_validator") - if key_validator is not None: - key_type = types.lookup_casstype(key_validator) - key_types = key_type.subtypes if issubclass(key_type, types.CompositeType) else [key_type] - else: - key_types = [types.lookup_casstype(r.get('validator')) for r in partition_rows] +@six.add_metaclass(ReplicationStrategyTypeType) +class _ReplicationStrategy(object): + options_map = None - for i, col_type in enumerate(key_types): - if len(key_aliases) > i: - column_name = key_aliases[i] - elif i == 0: - column_name = "key" - else: - column_name = "key%d" % i + @classmethod + def create(cls, strategy_class, options_map): + if not strategy_class: + return None - col = ColumnMetadata(table_meta, column_name, col_type) - table_meta.columns[column_name] = col - table_meta.partition_key.append(col) + strategy_name = trim_if_startswith(strategy_class, REPLICATION_STRATEGY_CLASS_PREFIX) - # clustering key - for i in range(clustering_size): - if len(column_aliases) > i: - column_name = column_aliases[i] - else: - column_name = "column%d" % i - - col = ColumnMetadata(table_meta, column_name, column_name_types[i]) - table_meta.columns[column_name] = col - table_meta.clustering_key.append(col) - - # value alias (if present) - if has_value: - value_alias_rows = [r for r in cf_col_rows - if r.get('type', None) == "compact_value"] - - if not key_aliases: # TODO are we checking the right thing here? - value_alias = "value" - else: - value_alias = row.get("value_alias", None) - if value_alias is None and value_alias_rows: # CASSANDRA-8487 - # In 2.0+, we can use the 'type' column. In 3.0+, we have to use it. - value_alias = value_alias_rows[0].get('column_name') - - default_validator = row.get("default_validator") - if default_validator: - validator = types.lookup_casstype(default_validator) - else: - if value_alias_rows: # CASSANDRA-8487 - validator = types.lookup_casstype(value_alias_rows[0].get('validator')) - - col = ColumnMetadata(table_meta, value_alias, validator) - if value_alias: # CASSANDRA-8487 - table_meta.columns[value_alias] = col - - # other normal columns - for col_row in cf_col_rows: - column_meta = self._build_column_metadata(table_meta, col_row) - table_meta.columns[column_meta.name] = column_meta - - if trigger_rows: - for trigger_row in trigger_rows[cfname]: - trigger_meta = self._build_trigger_metadata(table_meta, trigger_row) - table_meta.triggers[trigger_meta.name] = trigger_meta - - table_meta.options = self._build_table_options(row) - table_meta.is_compact_storage = is_compact - - return table_meta - - def _build_table_options(self, row): - """ Setup the mostly-non-schema table options, like caching settings """ - options = dict((o, row.get(o)) for o in TableMetadata.recognized_options if o in row) - - # the option name when creating tables is "dclocal_read_repair_chance", - # but the column name in system.schema_columnfamilies is - # "local_read_repair_chance". We'll store this as dclocal_read_repair_chance, - # since that's probably what users are expecting (and we need it for the - # CREATE TABLE statement anyway). - if "local_read_repair_chance" in options: - val = options.pop("local_read_repair_chance") - options["dclocal_read_repair_chance"] = val - - return options - - def _build_column_metadata(self, table_metadata, row): - name = row["column_name"] - data_type = types.lookup_casstype(row["validator"]) - is_static = row.get("type", None) == "static" - column_meta = ColumnMetadata(table_metadata, name, data_type, is_static=is_static) - index_meta = self._build_index_metadata(column_meta, row) - column_meta.index = index_meta - if index_meta: - table_metadata.indexes[index_meta.name] = index_meta - return column_meta - - def _build_index_metadata(self, column_metadata, row): - index_name = row.get("index_name") - index_type = row.get("index_type") - if index_name or index_type: - options = row.get("index_options") - index_options = json.loads(options) if options else {} - return IndexMetadata(column_metadata, index_name, index_type, index_options) - else: - return None - - def _build_trigger_metadata(self, table_metadata, row): - name = row["trigger_name"] - options = row["trigger_options"] - trigger_meta = TriggerMetadata(table_metadata, name, options) - return trigger_meta - - def rebuild_token_map(self, partitioner, token_map): - """ - Rebuild our view of the topology from fresh rows from the - system topology tables. - For internal use only. - """ - self.partitioner = partitioner - if partitioner.endswith('RandomPartitioner'): - token_class = MD5Token - elif partitioner.endswith('Murmur3Partitioner'): - token_class = Murmur3Token - elif partitioner.endswith('ByteOrderedPartitioner'): - token_class = BytesToken - else: - self.token_map = None - return - - token_to_host_owner = {} - ring = [] - for host, token_strings in six.iteritems(token_map): - for token_string in token_strings: - token = token_class(token_string) - ring.append(token) - token_to_host_owner[token] = host - - all_tokens = sorted(ring) - self.token_map = TokenMap( - token_class, token_to_host_owner, all_tokens, self) - - def get_replicas(self, keyspace, key): - """ - Returns a list of :class:`.Host` instances that are replicas for a given - partition key. - """ - t = self.token_map - if not t: - return [] - try: - return t.get_replicas(keyspace, t.token_class.from_key(key)) - except NoMurmur3: - return [] - - def can_support_partitioner(self): - if self.partitioner.endswith('Murmur3Partitioner') and murmur3 is None: - return False - else: - return True - - def add_or_return_host(self, host): - """ - Returns a tuple (host, new), where ``host`` is a Host - instance, and ``new`` is a bool indicating whether - the host was newly added. - """ - with self._hosts_lock: - try: - return self._hosts[host.address], False - except KeyError: - self._hosts[host.address] = host - return host, True - - def remove_host(self, host): - with self._hosts_lock: - return bool(self._hosts.pop(host.address, False)) - - def get_host(self, address): - return self._hosts.get(address) - - def all_hosts(self): - """ - Returns a list of all known :class:`.Host` instances in the cluster. - """ - with self._hosts_lock: - return self._hosts.values() - - -REPLICATION_STRATEGY_CLASS_PREFIX = "org.apache.cassandra.locator." - - -def trim_if_startswith(s, prefix): - if s.startswith(prefix): - return s[len(prefix):] - return s - - -_replication_strategies = {} - - -class ReplicationStrategyTypeType(type): - def __new__(metacls, name, bases, dct): - dct.setdefault('name', name) - cls = type.__new__(metacls, name, bases, dct) - if not name.startswith('_'): - _replication_strategies[name] = cls - return cls - - -@six.add_metaclass(ReplicationStrategyTypeType) -class _ReplicationStrategy(object): - options_map = None - - @classmethod - def create(cls, strategy_class, options_map): - if not strategy_class: - return None - - strategy_name = trim_if_startswith(strategy_class, REPLICATION_STRATEGY_CLASS_PREFIX) - - rs_class = _replication_strategies.get(strategy_name, None) - if rs_class is None: - rs_class = _UnknownStrategyBuilder(strategy_name) - _replication_strategies[strategy_name] = rs_class + rs_class = _replication_strategies.get(strategy_name, None) + if rs_class is None: + rs_class = _UnknownStrategyBuilder(strategy_name) + _replication_strategies[strategy_name] = rs_class try: rs_instance = rs_class(options_map) @@ -1138,7 +908,15 @@ class TableMetadata(object): """ keyspace = None - """ An instance of :class:`~.KeyspaceMetadata`. """ + """ + An instance of :class:`~.KeyspaceMetadata`. + + .. deprecated:: 2.7.0 + + """ + + keyspace_name = None + """ String name of this Table's keyspace """ name = None """ The string name of the table. """ @@ -1236,8 +1014,8 @@ def is_cql_compatible(self): return not incompatible - def __init__(self, keyspace_metadata, name, partition_key=None, clustering_key=None, columns=None, triggers=None, options=None): - self.keyspace = keyspace_metadata + def __init__(self, keyspace_name, name, partition_key=None, clustering_key=None, columns=None, triggers=None, options=None): + self.keyspace_name = keyspace_name self.name = name self.partition_key = [] if partition_key is None else partition_key self.clustering_key = [] if clustering_key is None else clustering_key @@ -1258,7 +1036,7 @@ def export_as_string(self): else: # If we can't produce this table with CQL, comment inline ret = "/*\nWarning: Table %s.%s omitted because it has constructs not compatible with CQL (was created via legacy API).\n" % \ - (self.keyspace.name, self.name) + (self.keyspace_name, self.name) ret += "\nApproximate structure, for reference:\n(this should not be used to reproduce this schema)\n\n%s" % self.all_as_cql() ret += "\n*/" @@ -1283,7 +1061,7 @@ def as_cql_query(self, formatted=False): extra whitespace will be added to make the query human readable. """ ret = "CREATE TABLE %s.%s (%s" % ( - protect_name(self.keyspace.name), + protect_name(self.keyspace_name), protect_name(self.name), "\n" if formatted else "") @@ -1738,3 +1516,471 @@ def as_cql_query(self): protect_value(self.options['class']) ) return ret + + +class _SchemaParser(object): + + def __init__(self, connection, timeout): + self.connection = connection + self.timeout = timeout + + def _handle_results(self, success, result): + if success: + return dict_factory(*result.results) if result else [] + else: + raise result + + +class SchemaParserV12(_SchemaParser): + _SELECT_KEYSPACES = "SELECT * FROM system.schema_keyspaces" + _SELECT_COLUMN_FAMILIES = "SELECT * FROM system.schema_columnfamilies" + _SELECT_COLUMNS = "SELECT * FROM system.schema_columns" + + pass + + +class SchemaParserV20(SchemaParserV12): + _SELECT_TRIGGERS = "SELECT * FROM system.schema_triggers" + + +class SchemaParserV21(SchemaParserV20): + _SELECT_USERTYPES = "SELECT * FROM system.schema_usertypes" + + +class SchemaParserV22(SchemaParserV21): + _SELECT_FUNCTIONS = "SELECT * FROM system.schema_functions" + _SELECT_AGGREGATES = "SELECT * FROM system.schema_aggregates" + + def __init__(self, connection, timeout): + super(SchemaParserV22, self).__init__(connection, timeout) + self.keyspaces_result = [] + self.tables_result = [] + self.columns_result = [] + self.triggers_result = [] + self.types_result = [] + self.functions_result = [] + self.aggregates_result = [] + + self.keyspace_table_rows = defaultdict(list) + self.keyspace_table_col_rows = defaultdict(lambda: defaultdict(list)) + self.keyspace_type_rows = defaultdict(list) + self.keyspace_func_rows = defaultdict(list) + self.keyspace_agg_rows = defaultdict(list) + self.keyspace_table_trigger_rows = defaultdict(lambda: defaultdict(list)) + + def get_all_keyspaces(self): + self._query_all() + + for row in self.keyspaces_result: + keyspace_meta = self._build_keyspace_metadata(row) + + keyspace_col_rows = self.keyspace_table_col_rows.get(keyspace_meta.name, {}) + keyspace_trigger_rows = self.keyspace_table_trigger_rows.get(keyspace_meta.name, {}) + for table_row in self.keyspace_table_rows.get(keyspace_meta.name, []): + table_meta = self._build_table_metadata(keyspace_meta.name, table_row, keyspace_col_rows, keyspace_trigger_rows) + table_meta.keyspace = keyspace_meta # temporary while TableMetadata.keyspace is deprecated + keyspace_meta._add_table_metadata(table_meta) + + for usertype_row in self.keyspace_type_rows.get(keyspace_meta.name, []): + usertype = self._build_user_type(keyspace_meta.name, usertype_row) + keyspace_meta.user_types[usertype.name] = usertype + + for fn_row in self.keyspace_func_rows.get(keyspace_meta.name, []): + fn = self._build_function(keyspace_meta.name, fn_row) + keyspace_meta.functions[fn.signature] = fn + + for agg_row in self.keyspace_agg_rows.get(keyspace_meta.name, []): + agg = self._build_aggregate(keyspace_meta.name, agg_row) + keyspace_meta.aggregates[agg.signature] = agg + + yield keyspace_meta + + def get_table(self, keyspace, table): + cl = ConsistencyLevel.ONE + where_clause = " WHERE keyspace_name = '%s' AND columnfamily_name = '%s'" % (keyspace, table) + cf_query = QueryMessage(query=self._SELECT_COLUMN_FAMILIES + where_clause, consistency_level=cl) + col_query = QueryMessage(query=self._SELECT_COLUMNS + where_clause, consistency_level=cl) + triggers_query = QueryMessage(query=self._SELECT_TRIGGERS + where_clause, consistency_level=cl) + (cf_success, cf_result), (col_success, col_result), (triggers_success, triggers_result) \ + = self.connection.wait_for_responses(cf_query, col_query, triggers_query, timeout=self.timeout, fail_on_error=False) + table_result = self._handle_results(cf_success, cf_result) + col_result = self._handle_results(col_success, col_result) + + # handle the triggers table not existing in Cassandra 1.2 + if not triggers_success and isinstance(triggers_result, InvalidRequest): + triggers_result = [] + else: + triggers_result = self._handle_results(triggers_success, triggers_result) + + if table_result: + return self._build_table_metadata(keyspace, table_result[0], {table: col_result}, {table: triggers_result}) + + # TODO: refactor common query/build code + def get_type(self, keyspace, type): + where_clause = " WHERE keyspace_name = '%s' AND type_name = '%s'" % (keyspace, type) + type_query = QueryMessage(query=self._SELECT_USERTYPES + where_clause, consistency_level=ConsistencyLevel.ONE) + type_result = self.connection.wait_for_response(type_query, self.timeout) + if type_result.results: + type_result = dict_factory(*type_result.results) + return self._build_user_type(keyspace, type_result[0]) + + def get_function(self, keyspace, function): + where_clause = " WHERE keyspace_name = '%s' AND function_name = '%s' AND signature = [%s]" \ + % (keyspace, function.name, ','.join("'%s'" % t for t in function.type_signature)) + function_query = QueryMessage(query=self._SELECT_FUNCTIONS + where_clause, consistency_level=ConsistencyLevel.ONE) + function_result = self.connection.wait_for_response(function_query, self.timeout) + if function_result.results: + function_result = dict_factory(*function_result.results) + return self._build_function(keyspace, function_result[0]) + + def get_aggregate(self, keyspace, aggregate): + # user defined aggregate within this keyspace changed + where_clause = " WHERE keyspace_name = '%s' AND aggregate_name = '%s' AND signature = [%s]" \ + % (keyspace, aggregate.name, ','.join("'%s'" % t for t in aggregate.type_signature)) + aggregate_query = QueryMessage(query=self._SELECT_AGGREGATES + where_clause, consistency_level=ConsistencyLevel.ONE) + aggregate_result = self.connection.wait_for_response(aggregate_query, self.timeout) + if aggregate_result.results: + aggregate_result = dict_factory(*aggregate_result.results) + return self._build_aggregate(keyspace, aggregate_result[0]) + + def get_keyspace(self, keyspace): + where_clause = " WHERE keyspace_name = '%s'" % (keyspace,) + ks_query = QueryMessage(query=self._SELECT_KEYSPACES + where_clause, consistency_level=ConsistencyLevel.ONE) + ks_result = self.connection.wait_for_response(ks_query, self.timeout) + if ks_result.results: + ks_result = dict_factory(*ks_result.results) + return self._build_keyspace_metadata(ks_result[0]) + + @staticmethod + def _build_keyspace_metadata(row): + name = row["keyspace_name"] + durable_writes = row["durable_writes"] + strategy_class = row["strategy_class"] + strategy_options = json.loads(row["strategy_options"]) + return KeyspaceMetadata(name, durable_writes, strategy_class, strategy_options) + + @staticmethod + def _build_user_type(keyspace, usertype_row): + type_classes = list(map(types.lookup_casstype, usertype_row['field_types'])) + return UserType(keyspace, usertype_row['type_name'], + usertype_row['field_names'], type_classes) + + @staticmethod + def _build_function(keyspace, function_row): + return_type = types.lookup_casstype(function_row['return_type']) + return Function(keyspace, function_row['function_name'], + function_row['signature'], function_row['argument_names'], + return_type, function_row['language'], function_row['body'], + function_row['called_on_null_input']) + + @staticmethod + def _build_aggregate(keyspace, aggregate_row): + state_type = types.lookup_casstype(aggregate_row['state_type']) + initial_condition = aggregate_row['initcond'] + if initial_condition is not None: + initial_condition = state_type.deserialize(initial_condition, 3) + return_type = types.lookup_casstype(aggregate_row['return_type']) + return Aggregate(keyspace, aggregate_row['aggregate_name'], + aggregate_row['signature'], aggregate_row['state_func'], state_type, + aggregate_row['final_func'], initial_condition, return_type) + + def _build_table_metadata(self, keyspace_name, row, col_rows, trigger_rows): + cfname = row["columnfamily_name"] + cf_col_rows = col_rows.get(cfname, []) + + if not cf_col_rows: # CASSANDRA-8487 + log.warning("Building table metadata with no column meta for %s.%s", + keyspace_name, cfname) + + comparator = types.lookup_casstype(row["comparator"]) + + if issubclass(comparator, types.CompositeType): + column_name_types = comparator.subtypes + is_composite_comparator = True + else: + column_name_types = (comparator,) + is_composite_comparator = False + + num_column_name_components = len(column_name_types) + last_col = column_name_types[-1] + + column_aliases = row.get("column_aliases", None) + + clustering_rows = [r for r in cf_col_rows + if r.get('type', None) == "clustering_key"] + if len(clustering_rows) > 1: + clustering_rows = sorted(clustering_rows, key=lambda row: row.get('component_index')) + + if column_aliases is not None: + column_aliases = json.loads(column_aliases) + else: + column_aliases = [r.get('column_name') for r in clustering_rows] + + if is_composite_comparator: + if issubclass(last_col, types.ColumnToCollectionType): + # collections + is_compact = False + has_value = False + clustering_size = num_column_name_components - 2 + elif (len(column_aliases) == num_column_name_components - 1 + and issubclass(last_col, types.UTF8Type)): + # aliases? + is_compact = False + has_value = False + clustering_size = num_column_name_components - 1 + else: + # compact table + is_compact = True + has_value = column_aliases or not cf_col_rows + clustering_size = num_column_name_components + + # Some thrift tables define names in composite types (see PYTHON-192) + if not column_aliases and hasattr(comparator, 'fieldnames'): + column_aliases = comparator.fieldnames + else: + is_compact = True + if column_aliases or not cf_col_rows: + has_value = True + clustering_size = num_column_name_components + else: + has_value = False + clustering_size = 0 + + table_meta = TableMetadata(keyspace_name, cfname) + table_meta.comparator = comparator + + # partition key + partition_rows = [r for r in cf_col_rows + if r.get('type', None) == "partition_key"] + + if len(partition_rows) > 1: + partition_rows = sorted(partition_rows, key=lambda row: row.get('component_index')) + + key_aliases = row.get("key_aliases") + if key_aliases is not None: + key_aliases = json.loads(key_aliases) if key_aliases else [] + else: + # In 2.0+, we can use the 'type' column. In 3.0+, we have to use it. + key_aliases = [r.get('column_name') for r in partition_rows] + + key_validator = row.get("key_validator") + if key_validator is not None: + key_type = types.lookup_casstype(key_validator) + key_types = key_type.subtypes if issubclass(key_type, types.CompositeType) else [key_type] + else: + key_types = [types.lookup_casstype(r.get('validator')) for r in partition_rows] + + for i, col_type in enumerate(key_types): + if len(key_aliases) > i: + column_name = key_aliases[i] + elif i == 0: + column_name = "key" + else: + column_name = "key%d" % i + + col = ColumnMetadata(table_meta, column_name, col_type) + table_meta.columns[column_name] = col + table_meta.partition_key.append(col) + + # clustering key + for i in range(clustering_size): + if len(column_aliases) > i: + column_name = column_aliases[i] + else: + column_name = "column%d" % i + + col = ColumnMetadata(table_meta, column_name, column_name_types[i]) + table_meta.columns[column_name] = col + table_meta.clustering_key.append(col) + + # value alias (if present) + if has_value: + value_alias_rows = [r for r in cf_col_rows + if r.get('type', None) == "compact_value"] + + if not key_aliases: # TODO are we checking the right thing here? + value_alias = "value" + else: + value_alias = row.get("value_alias", None) + if value_alias is None and value_alias_rows: # CASSANDRA-8487 + # In 2.0+, we can use the 'type' column. In 3.0+, we have to use it. + value_alias = value_alias_rows[0].get('column_name') + + default_validator = row.get("default_validator") + if default_validator: + validator = types.lookup_casstype(default_validator) + else: + if value_alias_rows: # CASSANDRA-8487 + validator = types.lookup_casstype(value_alias_rows[0].get('validator')) + + col = ColumnMetadata(table_meta, value_alias, validator) + if value_alias: # CASSANDRA-8487 + table_meta.columns[value_alias] = col + + # other normal columns + for col_row in cf_col_rows: + column_meta = self._build_column_metadata(table_meta, col_row) + table_meta.columns[column_meta.name] = column_meta + + if trigger_rows: + for trigger_row in trigger_rows[cfname]: + trigger_meta = self._build_trigger_metadata(table_meta, trigger_row) + table_meta.triggers[trigger_meta.name] = trigger_meta + + table_meta.options = self._build_table_options(row) + table_meta.is_compact_storage = is_compact + + return table_meta + + @staticmethod + def _build_table_options(row): + """ Setup the mostly-non-schema table options, like caching settings """ + options = dict((o, row.get(o)) for o in TableMetadata.recognized_options if o in row) + + # the option name when creating tables is "dclocal_read_repair_chance", + # but the column name in system.schema_columnfamilies is + # "local_read_repair_chance". We'll store this as dclocal_read_repair_chance, + # since that's probably what users are expecting (and we need it for the + # CREATE TABLE statement anyway). + if "local_read_repair_chance" in options: + val = options.pop("local_read_repair_chance") + options["dclocal_read_repair_chance"] = val + + return options + + def _build_column_metadata(self, table_metadata, row): + name = row["column_name"] + data_type = types.lookup_casstype(row["validator"]) + is_static = row.get("type", None) == "static" + column_meta = ColumnMetadata(table_metadata, name, data_type, is_static=is_static) + index_meta = self._build_index_metadata(column_meta, row) + column_meta.index = index_meta + if index_meta: + table_metadata.indexes[index_meta.name] = index_meta + return column_meta + + @staticmethod + def _build_index_metadata(column_metadata, row): + index_name = row.get("index_name") + index_type = row.get("index_type") + if index_name or index_type: + options = row.get("index_options") + index_options = json.loads(options) if options else {} + return IndexMetadata(column_metadata, index_name, index_type, index_options) + else: + return None + + @staticmethod + def _build_trigger_metadata(table_metadata, row): + name = row["trigger_name"] + options = row["trigger_options"] + trigger_meta = TriggerMetadata(table_metadata, name, options) + return trigger_meta + + def _query_all(self): + cl = ConsistencyLevel.ONE + queries = [ + QueryMessage(query=self._SELECT_KEYSPACES, consistency_level=cl), + QueryMessage(query=self._SELECT_COLUMN_FAMILIES, consistency_level=cl), + QueryMessage(query=self._SELECT_COLUMNS, consistency_level=cl), + QueryMessage(query=self._SELECT_USERTYPES, consistency_level=cl), + QueryMessage(query=self._SELECT_FUNCTIONS, consistency_level=cl), + QueryMessage(query=self._SELECT_AGGREGATES, consistency_level=cl), + QueryMessage(query=self._SELECT_TRIGGERS, consistency_level=cl) + ] + + responses = self.connection.wait_for_responses(*queries, timeout=self.timeout, fail_on_error=False) + (ks_success, ks_result), (table_success, table_result), \ + (col_success, col_result), (types_success, types_result), \ + (functions_success, functions_result), \ + (aggregates_success, aggregates_result), \ + (trigger_success, triggers_result) = responses + + self.keyspaces_result = self._handle_results(ks_success, ks_result) + self.tables_result = self._handle_results(table_success, table_result) + self.columns_result = self._handle_results(col_success, col_result) + + # if we're connected to Cassandra < 2.0, the triggers table will not exist + if trigger_success: + self.triggers_result = dict_factory(*triggers_result.results) + else: + if isinstance(triggers_result, InvalidRequest): + log.debug("triggers table not found") + elif isinstance(triggers_result, Unauthorized): + log.warning("this version of Cassandra does not allow access to schema_triggers metadata with authorization enabled (CASSANDRA-7967); " + "The driver will operate normally, but will not reflect triggers in the local metadata model, or schema strings.") + else: + raise triggers_result + + # if we're connected to Cassandra < 2.1, the usertypes table will not exist + if types_success: + self.types_result = dict_factory(*types_result.results) + else: + if isinstance(types_result, InvalidRequest): + log.debug("user types table not found") + self.types_result = {} + else: + raise types_result + + # functions were introduced in Cassandra 2.2 + if functions_success: + self.functions_result = dict_factory(*functions_result.results) + else: + if isinstance(functions_result, InvalidRequest): + log.debug("user functions table not found") + else: + raise functions_result + + # aggregates were introduced in Cassandra 2.2 + if aggregates_success: + self.aggregates_result = dict_factory(aggregates_result) + else: + if isinstance(aggregates_result, InvalidRequest): + log.debug("user aggregates table not found") + else: + raise aggregates_result + + self._aggregate_results() + + def _aggregate_results(self): + m = self.keyspace_table_rows + for row in self.tables_result: + m[row["keyspace_name"]].append(row) + + m = self.keyspace_table_col_rows + for row in self.columns_result: + ksname = row["keyspace_name"] + cfname = row["columnfamily_name"] + m[ksname][cfname].append(row) + + m = self.keyspace_type_rows + for row in self.types_result: + m[row["keyspace_name"]].append(row) + + m = self.keyspace_func_rows + for row in self.functions_result: + m[row["keyspace_name"]].append(row) + + m = self.keyspace_agg_rows + for row in self.aggregates_result: + m[row["keyspace_name"]].append(row) + + m = self.keyspace_table_trigger_rows + for row in self.triggers_result: + ksname = row["keyspace_name"] + cfname = row["columnfamily_name"] + m[ksname][cfname].append(row) + + +class SchemaParserV3(SchemaParserV22): + pass + + +def get_schema_parser(connection, timeout): + server_version = connection.server_version + if server_version.startswith('3'): + return SchemaParserV3(connection, timeout) + else: + # we could further specialize by version. Right now just refactoring the + # multi-version parser we have. + return SchemaParserV22(connection, timeout) diff --git a/cassandra/protocol.py b/cassandra/protocol.py index 9b544236ec..d257825d79 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -26,7 +26,7 @@ WriteFailure, ReadFailure, FunctionFailure, AlreadyExists, InvalidRequest, Unauthorized, UnsupportedOperation, UserFunctionDescriptor, - UserAggregateDescriptor) + UserAggregateDescriptor, SchemaTargetType) from cassandra.marshal import (int32_pack, int32_unpack, uint16_pack, uint16_unpack, int8_pack, int8_unpack, uint64_pack, header_pack, v3_header_pack) @@ -69,6 +69,7 @@ class InternalError(Exception): _UNSET_VALUE = object() + class _RegisterMessageType(type): def __init__(cls, name, bases, dct): if not name.startswith('_'): @@ -948,19 +949,22 @@ def recv_schema_change(cls, f, protocol_version): if protocol_version >= 3: target = read_string(f) keyspace = read_string(f) - event = {'change_type': change_type, 'keyspace': keyspace} - if target != "KEYSPACE": + event = {'target_type': target, 'change_type': change_type, 'keyspace': keyspace} + if target != SchemaTargetType.KEYSPACE: target_name = read_string(f) - if target == 'FUNCTION': + if target == SchemaTargetType.FUNCTION: event['function'] = UserFunctionDescriptor(target_name, [read_string(f) for _ in range(read_short(f))]) - elif target == 'AGGREGATE': + elif target == SchemaTargetType.AGGREGATE: event['aggregate'] = UserAggregateDescriptor(target_name, [read_string(f) for _ in range(read_short(f))]) else: event[target.lower()] = target_name else: keyspace = read_string(f) table = read_string(f) - event = {'change_type': change_type, 'keyspace': keyspace, 'table': table} + if table: + event = {'target_type': SchemaTargetType.TABLE, 'change_type': change_type, 'keyspace': keyspace, 'table': table} + else: + event = {'target_type': SchemaTargetType.KEYSPACE, 'change_type': change_type, 'keyspace': keyspace} return event diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 1d7f68df57..582977a95e 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -125,7 +125,7 @@ def test_basic_table_meta_properties(self): self.assertTrue(self.cfname in ksmeta.tables) tablemeta = ksmeta.tables[self.cfname] - self.assertEqual(tablemeta.keyspace, ksmeta) + self.assertEqual(tablemeta.keyspace, ksmeta) # tablemeta.keyspace is deprecated self.assertEqual(tablemeta.name, self.cfname) self.assertEqual([u'a'], [c.name for c in tablemeta.partition_key]) diff --git a/tests/unit/test_control_connection.py b/tests/unit/test_control_connection.py index 9c93f5afe4..dc3e8fa1fc 100644 --- a/tests/unit/test_control_connection.py +++ b/tests/unit/test_control_connection.py @@ -20,7 +20,7 @@ from concurrent.futures import ThreadPoolExecutor from mock import Mock, ANY, call -from cassandra import OperationTimedOut +from cassandra import OperationTimedOut, SchemaTargetType, SchemaChangeType from cassandra.protocol import ResultMessage, RESULT_KIND_ROWS from cassandra.cluster import ControlConnection, _Scheduler from cassandra.pool import Host @@ -94,8 +94,8 @@ class MockConnection(object): def __init__(self): self.host = "192.168.1.0" self.local_results = [ - ["schema_version", "cluster_name", "data_center", "rack", "partitioner", "tokens"], - [["a", "foocluster", "dc1", "rack1", "Murmur3Partitioner", ["0", "100", "200"]]] + ["schema_version", "cluster_name", "data_center", "rack", "partitioner", "release_version", "tokens"], + [["a", "foocluster", "dc1", "rack1", "Murmur3Partitioner", "2.2.0", ["0", "100", "200"]]] ] self.peer_results = [ @@ -136,8 +136,8 @@ def setUp(self): def _get_matching_schema_preloaded_results(self): local_results = [ - ["schema_version", "cluster_name", "data_center", "rack", "partitioner", "tokens"], - [["a", "foocluster", "dc1", "rack1", "Murmur3Partitioner", ["0", "100", "200"]]] + ["schema_version", "cluster_name", "data_center", "rack", "partitioner", "release_version", "tokens"], + [["a", "foocluster", "dc1", "rack1", "Murmur3Partitioner", "2.2.0", ["0", "100", "200"]]] ] local_response = ResultMessage(kind=RESULT_KIND_ROWS, results=local_results) @@ -152,8 +152,8 @@ def _get_matching_schema_preloaded_results(self): def _get_nonmatching_schema_preloaded_results(self): local_results = [ - ["schema_version", "cluster_name", "data_center", "rack", "partitioner", "tokens"], - [["a", "foocluster", "dc1", "rack1", "Murmur3Partitioner", ["0", "100", "200"]]] + ["schema_version", "cluster_name", "data_center", "rack", "partitioner", "release_version", "tokens"], + [["a", "foocluster", "dc1", "rack1", "Murmur3Partitioner", "2.2.0", ["0", "100", "200"]]] ] local_response = ResultMessage(kind=RESULT_KIND_ROWS, results=local_results) @@ -402,26 +402,30 @@ def test_handle_status_change(self): def test_handle_schema_change(self): - for change_type in ('CREATED', 'DROPPED', 'UPDATED'): + change_types = [getattr(SchemaChangeType, attr) for attr in vars(SchemaChangeType) if attr[0] != '_'] + for change_type in change_types: event = { + 'target_type': SchemaTargetType.TABLE, 'change_type': change_type, 'keyspace': 'ks1', 'table': 'table1' } self.cluster.scheduler.reset_mock() self.control_connection._handle_schema_change(event) - self.cluster.scheduler.schedule_unique.assert_called_once_with(0.0, self.control_connection.refresh_schema, 'ks1', 'table1', None, None, None) + self.cluster.scheduler.schedule_unique.assert_called_once_with(0.0, self.control_connection.refresh_schema, **event) self.cluster.scheduler.reset_mock() - event['table'] = None + event['target_type'] = SchemaTargetType.KEYSPACE + del event['table'] self.control_connection._handle_schema_change(event) - self.cluster.scheduler.schedule_unique.assert_called_once_with(0.0, self.control_connection.refresh_schema, 'ks1', None, None, None, None) + self.cluster.scheduler.schedule_unique.assert_called_once_with(0.0, self.control_connection.refresh_schema, **event) def test_refresh_disabled(self): cluster = MockCluster() schema_event = { - 'change_type': 'CREATED', + 'target_type': SchemaTargetType.TABLE, + 'change_type': SchemaChangeType.CREATED, 'keyspace': 'ks1', 'table': 'table1' } @@ -463,4 +467,4 @@ def test_refresh_disabled(self): cc_no_topo_refresh._handle_schema_change(schema_event) cluster.scheduler.schedule_unique.assert_has_calls([call(ANY, cc_no_topo_refresh.refresh_node_list_and_token_map), call(0.0, cc_no_topo_refresh.refresh_schema, - schema_event['keyspace'], schema_event['table'], None, None, None)]) + **schema_event)]) diff --git a/tests/unit/test_metadata.py b/tests/unit/test_metadata.py index 8bdd428c65..3da3945f2d 100644 --- a/tests/unit/test_metadata.py +++ b/tests/unit/test_metadata.py @@ -26,7 +26,7 @@ NetworkTopologyStrategy, SimpleStrategy, LocalStrategy, NoMurmur3, protect_name, protect_names, protect_value, is_valid_name, - UserType, KeyspaceMetadata, Metadata, + UserType, KeyspaceMetadata, get_schema_parser, _UnknownStrategy) from cassandra.policies import SimpleConvictionPolicy from cassandra.pool import Host @@ -316,15 +316,17 @@ def test_build_index_as_cql(self): column_meta.name = 'column_name_here' column_meta.table.name = 'table_name_here' column_meta.table.keyspace.name = 'keyspace_name_here' - meta_model = Metadata() + connection = Mock() + connection.server_version = '2.1.0' + parser = get_schema_parser(connection, 0.1) row = {'index_name': 'index_name_here', 'index_type': 'index_type_here'} - index_meta = meta_model._build_index_metadata(column_meta, row) + index_meta = parser._build_index_metadata(column_meta, row) self.assertEqual(index_meta.as_cql_query(), 'CREATE INDEX index_name_here ON keyspace_name_here.table_name_here (column_name_here)') row['index_options'] = '{ "class_name": "class_name_here" }' row['index_type'] = 'CUSTOM' - index_meta = meta_model._build_index_metadata(column_meta, row) + index_meta = parser._build_index_metadata(column_meta, row) self.assertEqual(index_meta.as_cql_query(), "CREATE CUSTOM INDEX index_name_here ON keyspace_name_here.table_name_here (column_name_here) USING 'class_name_here'") diff --git a/tests/unit/test_response_future.py b/tests/unit/test_response_future.py index 92351a9d1d..600f85bcac 100644 --- a/tests/unit/test_response_future.py +++ b/tests/unit/test_response_future.py @@ -19,7 +19,7 @@ from mock import Mock, MagicMock, ANY -from cassandra import ConsistencyLevel, Unavailable +from cassandra import ConsistencyLevel, Unavailable, SchemaTargetType, SchemaChangeType from cassandra.cluster import Session, ResponseFuture, NoHostAvailable from cassandra.connection import Connection, ConnectionException from cassandra.protocol import (ReadTimeoutErrorMessage, WriteTimeoutErrorMessage, @@ -101,11 +101,13 @@ def test_schema_change_result(self): rf = self.make_response_future(session) rf.send_request() + event_results={'target_type': SchemaTargetType.TABLE, 'change_type': SchemaChangeType.CREATED, + 'keyspace': "keyspace1", "table": "table1"} result = Mock(spec=ResultMessage, kind=RESULT_KIND_SCHEMA_CHANGE, - results={'keyspace': "keyspace1", "table": "table1"}) + results=event_results) rf._set_result(result) - session.submit.assert_called_once_with(ANY, 'keyspace1', 'table1', None, None, None, ANY, rf) + session.submit.assert_called_once_with(ANY, ANY, rf, **event_results) def test_other_result_message_kind(self): session = self.make_session() From c7690aaefa5e90e71576740f2a008e183fc32513 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 1 Jul 2015 14:14:05 -0500 Subject: [PATCH 1543/3726] Don't use change_type in schema refresh. Was hoping to use change_type to avoid a query on DROPPED. However, this introduces issues when the async drop event coincides with a create or updatee on the same schema element. Always querying makes sure the refresh sees the new element, assuming schema agreement wait is enabled. --- cassandra/__init__.py | 3 +++ cassandra/metadata.py | 36 ++++++++++++++++-------------------- 2 files changed, 19 insertions(+), 20 deletions(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index ad073f7ccc..a6beab2032 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -159,6 +159,9 @@ def signature(self): def format_signature(name, type_signature): return "%s(%s)" % (name, ','.join(t for t in type_signature)) + def __repr__(self): + return "%s(%s, %s)" % (self.__class__.__name__, self.name, self.type_signature) + class UserFunctionDescriptor(SignatureDescriptor): """ diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 73c7687c6c..b79ff10a58 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -120,19 +120,15 @@ def refresh(self, connection, timeout, target_type=None, change_type=None, **kwa tt_lower = target_type.lower() try: - if change_type == SchemaChangeType.DROPPED: + parser = get_schema_parser(connection, timeout) + parse_method = getattr(parser, 'get_' + tt_lower) + meta = parse_method(**kwargs) + if meta: + update_method = getattr(self, '_update_' + tt_lower) + update_method(meta) + else: drop_method = getattr(self, '_drop_' + tt_lower) drop_method(**kwargs) - else: - parser = get_schema_parser(connection, timeout) - parse_method = getattr(parser, 'get_' + tt_lower) - meta = parse_method(**kwargs) - if meta: - update_method = getattr(self, '_update_' + tt_lower) - update_method(meta) - else: - drop_method = getattr(self, '_drop_' + tt_lower) - drop_method(**kwargs) except AttributeError: raise ValueError("Unknown schema target_type: '%s'" % target_type) @@ -1620,8 +1616,8 @@ def get_type(self, keyspace, type): where_clause = " WHERE keyspace_name = '%s' AND type_name = '%s'" % (keyspace, type) type_query = QueryMessage(query=self._SELECT_USERTYPES + where_clause, consistency_level=ConsistencyLevel.ONE) type_result = self.connection.wait_for_response(type_query, self.timeout) - if type_result.results: - type_result = dict_factory(*type_result.results) + type_result = dict_factory(*type_result.results) + if type_result: return self._build_user_type(keyspace, type_result[0]) def get_function(self, keyspace, function): @@ -1629,8 +1625,8 @@ def get_function(self, keyspace, function): % (keyspace, function.name, ','.join("'%s'" % t for t in function.type_signature)) function_query = QueryMessage(query=self._SELECT_FUNCTIONS + where_clause, consistency_level=ConsistencyLevel.ONE) function_result = self.connection.wait_for_response(function_query, self.timeout) - if function_result.results: - function_result = dict_factory(*function_result.results) + function_result = dict_factory(*function_result.results) + if function_result: return self._build_function(keyspace, function_result[0]) def get_aggregate(self, keyspace, aggregate): @@ -1639,16 +1635,16 @@ def get_aggregate(self, keyspace, aggregate): % (keyspace, aggregate.name, ','.join("'%s'" % t for t in aggregate.type_signature)) aggregate_query = QueryMessage(query=self._SELECT_AGGREGATES + where_clause, consistency_level=ConsistencyLevel.ONE) aggregate_result = self.connection.wait_for_response(aggregate_query, self.timeout) - if aggregate_result.results: - aggregate_result = dict_factory(*aggregate_result.results) + aggregate_result = dict_factory(*aggregate_result.results) + if aggregate_result: return self._build_aggregate(keyspace, aggregate_result[0]) def get_keyspace(self, keyspace): where_clause = " WHERE keyspace_name = '%s'" % (keyspace,) ks_query = QueryMessage(query=self._SELECT_KEYSPACES + where_clause, consistency_level=ConsistencyLevel.ONE) ks_result = self.connection.wait_for_response(ks_query, self.timeout) - if ks_result.results: - ks_result = dict_factory(*ks_result.results) + ks_result = dict_factory(*ks_result.results) + if ks_result: return self._build_keyspace_metadata(ks_result[0]) @staticmethod @@ -1933,7 +1929,7 @@ def _query_all(self): # aggregates were introduced in Cassandra 2.2 if aggregates_success: - self.aggregates_result = dict_factory(aggregates_result) + self.aggregates_result = dict_factory(*aggregates_result.results) else: if isinstance(aggregates_result, InvalidRequest): log.debug("user aggregates table not found") From 483d7d366d44380c748aec01abe688e07a911f3e Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 1 Jul 2015 15:00:04 -0500 Subject: [PATCH 1544/3726] Refactor common query row/build meta functionality --- cassandra/metadata.py | 58 ++++++++++++++++++------------------------- 1 file changed, 24 insertions(+), 34 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index b79ff10a58..763b081ab1 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -1526,6 +1526,13 @@ def _handle_results(self, success, result): else: raise result + def _query_build_row(self, query_string, build_func): + query = QueryMessage(query=query_string, consistency_level=ConsistencyLevel.ONE) + response = self.connection.wait_for_response(query, self.timeout) + result = dict_factory(*response.results) + if result: + return build_func(result[0]) + class SchemaParserV12(_SchemaParser): _SELECT_KEYSPACES = "SELECT * FROM system.schema_keyspaces" @@ -1573,20 +1580,20 @@ def get_all_keyspaces(self): keyspace_col_rows = self.keyspace_table_col_rows.get(keyspace_meta.name, {}) keyspace_trigger_rows = self.keyspace_table_trigger_rows.get(keyspace_meta.name, {}) for table_row in self.keyspace_table_rows.get(keyspace_meta.name, []): - table_meta = self._build_table_metadata(keyspace_meta.name, table_row, keyspace_col_rows, keyspace_trigger_rows) + table_meta = self._build_table_metadata(table_row, keyspace_col_rows, keyspace_trigger_rows) table_meta.keyspace = keyspace_meta # temporary while TableMetadata.keyspace is deprecated keyspace_meta._add_table_metadata(table_meta) for usertype_row in self.keyspace_type_rows.get(keyspace_meta.name, []): - usertype = self._build_user_type(keyspace_meta.name, usertype_row) + usertype = self._build_user_type(usertype_row) keyspace_meta.user_types[usertype.name] = usertype for fn_row in self.keyspace_func_rows.get(keyspace_meta.name, []): - fn = self._build_function(keyspace_meta.name, fn_row) + fn = self._build_function(fn_row) keyspace_meta.functions[fn.signature] = fn for agg_row in self.keyspace_agg_rows.get(keyspace_meta.name, []): - agg = self._build_aggregate(keyspace_meta.name, agg_row) + agg = self._build_aggregate(agg_row) keyspace_meta.aggregates[agg.signature] = agg yield keyspace_meta @@ -1609,43 +1616,25 @@ def get_table(self, keyspace, table): triggers_result = self._handle_results(triggers_success, triggers_result) if table_result: - return self._build_table_metadata(keyspace, table_result[0], {table: col_result}, {table: triggers_result}) + return self._build_table_metadata(table_result[0], {table: col_result}, {table: triggers_result}) - # TODO: refactor common query/build code def get_type(self, keyspace, type): where_clause = " WHERE keyspace_name = '%s' AND type_name = '%s'" % (keyspace, type) - type_query = QueryMessage(query=self._SELECT_USERTYPES + where_clause, consistency_level=ConsistencyLevel.ONE) - type_result = self.connection.wait_for_response(type_query, self.timeout) - type_result = dict_factory(*type_result.results) - if type_result: - return self._build_user_type(keyspace, type_result[0]) + return self._query_build_row(self._SELECT_USERTYPES + where_clause, self._build_user_type) def get_function(self, keyspace, function): where_clause = " WHERE keyspace_name = '%s' AND function_name = '%s' AND signature = [%s]" \ % (keyspace, function.name, ','.join("'%s'" % t for t in function.type_signature)) - function_query = QueryMessage(query=self._SELECT_FUNCTIONS + where_clause, consistency_level=ConsistencyLevel.ONE) - function_result = self.connection.wait_for_response(function_query, self.timeout) - function_result = dict_factory(*function_result.results) - if function_result: - return self._build_function(keyspace, function_result[0]) + return self._query_build_row(self._SELECT_FUNCTIONS + where_clause, self._build_function) def get_aggregate(self, keyspace, aggregate): - # user defined aggregate within this keyspace changed where_clause = " WHERE keyspace_name = '%s' AND aggregate_name = '%s' AND signature = [%s]" \ % (keyspace, aggregate.name, ','.join("'%s'" % t for t in aggregate.type_signature)) - aggregate_query = QueryMessage(query=self._SELECT_AGGREGATES + where_clause, consistency_level=ConsistencyLevel.ONE) - aggregate_result = self.connection.wait_for_response(aggregate_query, self.timeout) - aggregate_result = dict_factory(*aggregate_result.results) - if aggregate_result: - return self._build_aggregate(keyspace, aggregate_result[0]) + return self._query_build_row(self._SELECT_AGGREGATES + where_clause, self._build_aggregate) def get_keyspace(self, keyspace): where_clause = " WHERE keyspace_name = '%s'" % (keyspace,) - ks_query = QueryMessage(query=self._SELECT_KEYSPACES + where_clause, consistency_level=ConsistencyLevel.ONE) - ks_result = self.connection.wait_for_response(ks_query, self.timeout) - ks_result = dict_factory(*ks_result.results) - if ks_result: - return self._build_keyspace_metadata(ks_result[0]) + return self._query_build_row(self._SELECT_KEYSPACES + where_clause, self._build_keyspace_metadata) @staticmethod def _build_keyspace_metadata(row): @@ -1656,31 +1645,32 @@ def _build_keyspace_metadata(row): return KeyspaceMetadata(name, durable_writes, strategy_class, strategy_options) @staticmethod - def _build_user_type(keyspace, usertype_row): + def _build_user_type(usertype_row): type_classes = list(map(types.lookup_casstype, usertype_row['field_types'])) - return UserType(keyspace, usertype_row['type_name'], + return UserType(usertype_row['keyspace_name'], usertype_row['type_name'], usertype_row['field_names'], type_classes) @staticmethod - def _build_function(keyspace, function_row): + def _build_function(function_row): return_type = types.lookup_casstype(function_row['return_type']) - return Function(keyspace, function_row['function_name'], + return Function(function_row['keyspace_name'], function_row['function_name'], function_row['signature'], function_row['argument_names'], return_type, function_row['language'], function_row['body'], function_row['called_on_null_input']) @staticmethod - def _build_aggregate(keyspace, aggregate_row): + def _build_aggregate(aggregate_row): state_type = types.lookup_casstype(aggregate_row['state_type']) initial_condition = aggregate_row['initcond'] if initial_condition is not None: initial_condition = state_type.deserialize(initial_condition, 3) return_type = types.lookup_casstype(aggregate_row['return_type']) - return Aggregate(keyspace, aggregate_row['aggregate_name'], + return Aggregate(aggregate_row['keyspace_name'], aggregate_row['aggregate_name'], aggregate_row['signature'], aggregate_row['state_func'], state_type, aggregate_row['final_func'], initial_condition, return_type) - def _build_table_metadata(self, keyspace_name, row, col_rows, trigger_rows): + def _build_table_metadata(self, row, col_rows, trigger_rows): + keyspace_name = row["keyspace_name"] cfname = row["columnfamily_name"] cf_col_rows = col_rows.get(cfname, []) From 179d0b6503028e54af6b4b5da509cce2e04fe21a Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 1 Jul 2015 16:44:41 -0500 Subject: [PATCH 1545/3726] integration test setup: use cluster meta instead of selecting directly. Required for version-dependent schema parsing. --- tests/integration/__init__.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 3568b1ffc6..33d3ae93c0 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -280,10 +280,8 @@ def setup_keyspace(ipformat=None): session = cluster.connect() try: - results = execute_until_pass(session, "SELECT keyspace_name FROM system.schema_keyspaces") - existing_keyspaces = [row[0] for row in results] for ksname in ('test1rf', 'test2rf', 'test3rf'): - if ksname in existing_keyspaces: + if ksname in cluster.metadata.keyspaces: execute_until_pass(session, "DROP KEYSPACE %s" % ksname) ddl = ''' From 5c585e2304e9d987528742e88b999176623be1cd Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 1 Jul 2015 16:45:46 -0500 Subject: [PATCH 1546/3726] Initial SchemaParser for C* 3 - select from new 'system_schema' tables - use new keyspaces 'replication' config map --- cassandra/metadata.py | 49 ++++++++++++++++++++++++------------------- 1 file changed, 28 insertions(+), 21 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 763b081ab1..6b177d7049 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -1534,26 +1534,17 @@ def _query_build_row(self, query_string, build_func): return build_func(result[0]) -class SchemaParserV12(_SchemaParser): +class SchemaParserV22(_SchemaParser): _SELECT_KEYSPACES = "SELECT * FROM system.schema_keyspaces" _SELECT_COLUMN_FAMILIES = "SELECT * FROM system.schema_columnfamilies" _SELECT_COLUMNS = "SELECT * FROM system.schema_columns" - - pass - - -class SchemaParserV20(SchemaParserV12): _SELECT_TRIGGERS = "SELECT * FROM system.schema_triggers" - - -class SchemaParserV21(SchemaParserV20): - _SELECT_USERTYPES = "SELECT * FROM system.schema_usertypes" - - -class SchemaParserV22(SchemaParserV21): + _SELECT_TYPES = "SELECT * FROM system.schema_usertypes" _SELECT_FUNCTIONS = "SELECT * FROM system.schema_functions" _SELECT_AGGREGATES = "SELECT * FROM system.schema_aggregates" + _table_name_col = 'columnfamily_name' + def __init__(self, connection, timeout): super(SchemaParserV22, self).__init__(connection, timeout) self.keyspaces_result = [] @@ -1600,7 +1591,7 @@ def get_all_keyspaces(self): def get_table(self, keyspace, table): cl = ConsistencyLevel.ONE - where_clause = " WHERE keyspace_name = '%s' AND columnfamily_name = '%s'" % (keyspace, table) + where_clause = " WHERE keyspace_name = '%s' AND %s = '%s'" % (keyspace, self._table_name_col, table) cf_query = QueryMessage(query=self._SELECT_COLUMN_FAMILIES + where_clause, consistency_level=cl) col_query = QueryMessage(query=self._SELECT_COLUMNS + where_clause, consistency_level=cl) triggers_query = QueryMessage(query=self._SELECT_TRIGGERS + where_clause, consistency_level=cl) @@ -1620,7 +1611,7 @@ def get_table(self, keyspace, table): def get_type(self, keyspace, type): where_clause = " WHERE keyspace_name = '%s' AND type_name = '%s'" % (keyspace, type) - return self._query_build_row(self._SELECT_USERTYPES + where_clause, self._build_user_type) + return self._query_build_row(self._SELECT_TYPES + where_clause, self._build_user_type) def get_function(self, keyspace, function): where_clause = " WHERE keyspace_name = '%s' AND function_name = '%s' AND signature = [%s]" \ @@ -1671,7 +1662,7 @@ def _build_aggregate(aggregate_row): def _build_table_metadata(self, row, col_rows, trigger_rows): keyspace_name = row["keyspace_name"] - cfname = row["columnfamily_name"] + cfname = row[self._table_name_col] cf_col_rows = col_rows.get(cfname, []) if not cf_col_rows: # CASSANDRA-8487 @@ -1869,7 +1860,7 @@ def _query_all(self): QueryMessage(query=self._SELECT_KEYSPACES, consistency_level=cl), QueryMessage(query=self._SELECT_COLUMN_FAMILIES, consistency_level=cl), QueryMessage(query=self._SELECT_COLUMNS, consistency_level=cl), - QueryMessage(query=self._SELECT_USERTYPES, consistency_level=cl), + QueryMessage(query=self._SELECT_TYPES, consistency_level=cl), QueryMessage(query=self._SELECT_FUNCTIONS, consistency_level=cl), QueryMessage(query=self._SELECT_AGGREGATES, consistency_level=cl), QueryMessage(query=self._SELECT_TRIGGERS, consistency_level=cl) @@ -1936,7 +1927,7 @@ def _aggregate_results(self): m = self.keyspace_table_col_rows for row in self.columns_result: ksname = row["keyspace_name"] - cfname = row["columnfamily_name"] + cfname = row[self._table_name_col] m[ksname][cfname].append(row) m = self.keyspace_type_rows @@ -1954,12 +1945,28 @@ def _aggregate_results(self): m = self.keyspace_table_trigger_rows for row in self.triggers_result: ksname = row["keyspace_name"] - cfname = row["columnfamily_name"] + cfname = row[self._table_name_col] m[ksname][cfname].append(row) class SchemaParserV3(SchemaParserV22): - pass + _SELECT_KEYSPACES = "SELECT * FROM system_schema.keyspaces" + _SELECT_COLUMN_FAMILIES = "SELECT * FROM system_schema.tables" + _SELECT_COLUMNS = "SELECT * FROM system_schema.columns" + _SELECT_TRIGGERS = "SELECT * FROM system_schema.triggers" + _SELECT_TYPES = "SELECT * FROM system_schema.types" + _SELECT_FUNCTIONS = "SELECT * FROM system_schema.functions" + _SELECT_AGGREGATES = "SELECT * FROM system_schema.aggregates" + + _table_name_col = 'table_name' + + @staticmethod + def _build_keyspace_metadata(row): + name = row["keyspace_name"] + durable_writes = row["durable_writes"] + strategy_options = dict(row["replication"]) + strategy_class = strategy_options.pop("class") + return KeyspaceMetadata(name, durable_writes, strategy_class, strategy_options) def get_schema_parser(connection, timeout): @@ -1968,5 +1975,5 @@ def get_schema_parser(connection, timeout): return SchemaParserV3(connection, timeout) else: # we could further specialize by version. Right now just refactoring the - # multi-version parser we have. + # multi-version parser we have as of C* 2.2.0rc1. return SchemaParserV22(connection, timeout) From 5eb47f3dd2957ca50649d70fac85f703146dae29 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 7 Jul 2015 13:52:19 -0500 Subject: [PATCH 1547/3726] format strings for Python 2.6 in test_udt integration test --- tests/integration/standard/test_udts.py | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/tests/integration/standard/test_udts.py b/tests/integration/standard/test_udts.py index eeef0508ba..495b67575d 100644 --- a/tests/integration/standard/test_udts.py +++ b/tests/integration/standard/test_udts.py @@ -295,8 +295,8 @@ def test_can_insert_udts_with_varying_lengths(self): MAX_TEST_LENGTH = 1024 # create the seed udt, increase timeout to avoid the query failure on slow systems - s.execute("CREATE TYPE lengthy_udt ({})" - .format(', '.join(['v_{} int'.format(i) + s.execute("CREATE TYPE lengthy_udt ({0})" + .format(', '.join(['v_{0} int'.format(i) for i in range(MAX_TEST_LENGTH)]))) # create a table with multiple sizes of nested udts @@ -306,7 +306,7 @@ def test_can_insert_udts_with_varying_lengths(self): "v frozen)") # create and register the seed udt type - udt = namedtuple('lengthy_udt', tuple(['v_{}'.format(i) for i in range(MAX_TEST_LENGTH)])) + udt = namedtuple('lengthy_udt', tuple(['v_{0}'.format(i) for i in range(MAX_TEST_LENGTH)])) c.register_user_type("udttests", "lengthy_udt", udt) # verify inserts and reads @@ -330,7 +330,7 @@ def nested_udt_schema_helper(self, session, MAX_NESTING_DEPTH): # create the nested udts for i in range(MAX_NESTING_DEPTH): - execute_until_pass(session, "CREATE TYPE depth_{} (value frozen)".format(i + 1, i)) + execute_until_pass(session, "CREATE TYPE depth_{0} (value frozen)".format(i + 1, i)) # create a table with multiple sizes of nested udts # no need for all nested types, only a spot checked few and the largest one @@ -390,9 +390,9 @@ def test_can_insert_nested_registered_udts(self): # create and register the nested udt types for i in range(MAX_NESTING_DEPTH): - udt = namedtuple('depth_{}'.format(i + 1), ('value')) + udt = namedtuple('depth_{0}'.format(i + 1), ('value')) udts.append(udt) - c.register_user_type("udttests", "depth_{}".format(i + 1), udts[i + 1]) + c.register_user_type("udttests", "depth_{0}".format(i + 1), udts[i + 1]) # insert udts and verify inserts with reads self.nested_udt_verification_helper(s, MAX_NESTING_DEPTH, udts) @@ -420,7 +420,7 @@ def test_can_insert_nested_unregistered_udts(self): # create the nested udt types for i in range(MAX_NESTING_DEPTH): - udt = namedtuple('depth_{}'.format(i + 1), ('value')) + udt = namedtuple('depth_{0}'.format(i + 1), ('value')) udts.append(udt) # insert udts via prepared statements and verify inserts with reads @@ -461,9 +461,9 @@ def test_can_insert_nested_registered_udts_with_different_namedtuples(self): # create and register the nested udt types for i in range(MAX_NESTING_DEPTH): - udt = namedtuple('level_{}'.format(i + 1), ('value')) + udt = namedtuple('level_{0}'.format(i + 1), ('value')) udts.append(udt) - c.register_user_type("udttests", "depth_{}".format(i + 1), udts[i + 1]) + c.register_user_type("udttests", "depth_{0}".format(i + 1), udts[i + 1]) # insert udts and verify inserts with reads self.nested_udt_verification_helper(s, MAX_NESTING_DEPTH, udts) @@ -514,7 +514,7 @@ def test_can_insert_udt_all_datatypes(self): # register UDT alphabet_list = [] for i in range(ord('a'), ord('a') + len(PRIMITIVE_DATATYPES)): - alphabet_list.append('{}'.format(chr(i))) + alphabet_list.append('{0}'.format(chr(i))) Alldatatypes = namedtuple("alldatatypes", alphabet_list) c.register_user_type("udttests", "alldatatypes", Alldatatypes) @@ -651,4 +651,4 @@ def test_can_insert_nested_collections(self): key2 = nested_collection_udt_nested({3: 'v3'}, value.t, value.l, value.s, value) validate('map_udt', OrderedMap([(key, value), (key2, value)])) - c.shutdown() \ No newline at end of file + c.shutdown() From a15dac726feb4e6c9b5ba2f1f387a9bcb46c267b Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 7 Jul 2015 15:08:43 -0500 Subject: [PATCH 1548/3726] Make integration test_udts work in Python3 --- tests/integration/standard/test_udts.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/integration/standard/test_udts.py b/tests/integration/standard/test_udts.py index 495b67575d..72aa658165 100644 --- a/tests/integration/standard/test_udts.py +++ b/tests/integration/standard/test_udts.py @@ -19,6 +19,7 @@ from collections import namedtuple from functools import partial +import six from cassandra import InvalidRequest from cassandra.cluster import Cluster, UserTypeDoesNotExist @@ -277,10 +278,9 @@ def test_can_insert_udts_with_nulls(self): self.assertEqual((None, None, None, None), s.execute(select)[0].b) # also test empty strings - s.execute(insert, [User('', None, None, '')]) + s.execute(insert, [User('', None, None, six.binary_type())]) results = s.execute("SELECT b FROM mytable WHERE a=0") - self.assertEqual(('', None, None, ''), results[0].b) - self.assertEqual(('', None, None, ''), s.execute(select)[0].b) + self.assertEqual(('', None, None, six.binary_type()), results[0].b) c.shutdown() @@ -292,7 +292,7 @@ def test_can_insert_udts_with_varying_lengths(self): c = Cluster(protocol_version=PROTOCOL_VERSION) s = c.connect("udttests") - MAX_TEST_LENGTH = 1024 + MAX_TEST_LENGTH = 254 # create the seed udt, increase timeout to avoid the query failure on slow systems s.execute("CREATE TYPE lengthy_udt ({0})" From 3cb1b603af7c538132315ca12bd82f2b9ea67a7a Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 7 Jul 2015 15:36:55 -0500 Subject: [PATCH 1549/3726] Make integration test_custom_payload work in Python3 --- .../integration/standard/test_custom_payload.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/tests/integration/standard/test_custom_payload.py b/tests/integration/standard/test_custom_payload.py index 87b38fac40..04b0b1f739 100644 --- a/tests/integration/standard/test_custom_payload.py +++ b/tests/integration/standard/test_custom_payload.py @@ -18,6 +18,8 @@ except ImportError: import unittest +import six + from cassandra.query import (SimpleStatement, BatchStatement, BatchType) from cassandra.cluster import Cluster @@ -123,29 +125,29 @@ def validate_various_custom_payloads(self, statement): """ # Simple key value - custom_payload = {'test': 'test_return'} + custom_payload = {'test': b'test_return'} self.execute_async_validate_custom_payload(statement=statement, custom_payload=custom_payload) # no key value - custom_payload = {'': ''} + custom_payload = {'': b''} self.execute_async_validate_custom_payload(statement=statement, custom_payload=custom_payload) # Space value - custom_payload = {' ': ' '} + custom_payload = {' ': b' '} self.execute_async_validate_custom_payload(statement=statement, custom_payload=custom_payload) # Long key value pair - key_value = "x" * 10000 - custom_payload = {key_value: key_value} + key_value = "x" * 10 + custom_payload = {key_value: six.b(key_value)} self.execute_async_validate_custom_payload(statement=statement, custom_payload=custom_payload) # Max supported value key pairs according C* binary protocol v4 should be 65534 (unsigned short max value) for i in range(65534): - custom_payload[str(i)] = str(i) + custom_payload[str(i)] = six.b('x') self.execute_async_validate_custom_payload(statement=statement, custom_payload=custom_payload) # Add one custom payload to this is too many key value pairs and should fail - custom_payload[str(65535)] = str(65535) + custom_payload[str(65535)] = six.b('x') with self.assertRaises(ValueError): self.execute_async_validate_custom_payload(statement=statement, custom_payload=custom_payload) From 858a26879c8f8ae2b3d7dba44fe4d5e5bbbd8501 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 7 Jul 2015 16:59:31 -0500 Subject: [PATCH 1550/3726] Mkae integration test_client_warnings work in Python3 --- tests/integration/standard/test_client_warnings.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/tests/integration/standard/test_client_warnings.py b/tests/integration/standard/test_client_warnings.py index 173cc69756..14405a8df1 100644 --- a/tests/integration/standard/test_client_warnings.py +++ b/tests/integration/standard/test_client_warnings.py @@ -47,7 +47,6 @@ def setUpClass(cls): for x in range(213): cls.warn_batch.add(cls.prepared, (x, x, 1)) - @classmethod def tearDownClass(cls): if PROTOCOL_VERSION < 4: @@ -105,7 +104,7 @@ def test_warning_with_custom_payload(self): - batch_size_warn_threshold_in_kb: 5 @test_category queries:client_warning """ - payload = {'key': 'value'} + payload = {'key': b'value'} future = self.session.execute_async(self.warn_batch, custom_payload=payload) future.result() self.assertEqual(len(future.warnings), 1) @@ -123,7 +122,7 @@ def test_warning_with_trace_and_custom_payload(self): - batch_size_warn_threshold_in_kb: 5 @test_category queries:client_warning """ - payload = {'key': 'value'} + payload = {'key': b'value'} future = self.session.execute_async(self.warn_batch, trace=True, custom_payload=payload) future.result() self.assertEqual(len(future.warnings), 1) From 9de29a76650c4198a86fe09a913d6594ff1f03af Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 8 Jul 2015 09:46:00 -0500 Subject: [PATCH 1551/3726] Add optional cythonized core driver files. --- .gitignore | 3 +++ cassandra/marshal.py | 6 ++++-- setup.py | 28 ++++++++++++++++++++-------- 3 files changed, 27 insertions(+), 10 deletions(-) diff --git a/.gitignore b/.gitignore index 664c6b45ef..ee93232c33 100644 --- a/.gitignore +++ b/.gitignore @@ -17,6 +17,9 @@ tests/integration/ccm setuptools*.tar.gz setuptools*.egg +cassandra/*.c +!cassandra/murmur3.c + # OSX .DS_Store diff --git a/cassandra/marshal.py b/cassandra/marshal.py index 6451ab00ae..fba5b78944 100644 --- a/cassandra/marshal.py +++ b/cassandra/marshal.py @@ -48,13 +48,15 @@ def _make_packer(format_string): def varint_unpack(term): val = int(''.join("%02x" % i for i in term), 16) if (term[0] & 128) != 0: - val -= 1 << (len(term) * 8) + len_term = len(term) # pulling this out of the expression to avoid overflow in cython optimized code + val -= 1 << (len_term * 8) return val else: def varint_unpack(term): # noqa val = int(term.encode('hex'), 16) if (ord(term[0]) & 128) != 0: - val = val - (1 << (len(term) * 8)) + len_term = len(term) # pulling this out of the expression to avoid overflow in cython optimized code + val = val - (1 << (len_term * 8)) return val diff --git a/setup.py b/setup.py index e85d332d37..b375458153 100644 --- a/setup.py +++ b/setup.py @@ -234,16 +234,28 @@ def run_setup(extensions): ], **kw) -extensions = [murmur3_ext, libev_ext] +extensions = [] + +if "--no-murmur3" not in sys.argv: + extensions.append(murmur3_ext) + +if "--no-libev" not in sys.argv: + extensions.append(libev_ext) + +if "--no-cython" not in sys.argv: + try: + from Cython.Build import cythonize + cython_candidates = ['cluster', 'concurrent', 'connection', 'cqltypes', 'marshal', 'metadata', 'pool', 'protocol', 'query', 'util'] + extensions.extend(cythonize( + [Extension('cassandra.%s' % m, ['cassandra/%s.py' % m], extra_compile_args=['-Wno-unused-function']) for m in cython_candidates], + exclude_failures=True)) + except ImportError: + warnings.warn("Cython is not installed. Not compiling core driver files as extensions (optional).") + if "--no-extensions" in sys.argv: - sys.argv = [a for a in sys.argv if a != "--no-extensions"] extensions = [] -elif "--no-murmur3" in sys.argv: - sys.argv = [a for a in sys.argv if a != "--no-murmur3"] - extensions.remove(murmur3_ext) -elif "--no-libev" in sys.argv: - sys.argv = [a for a in sys.argv if a != "--no-libev"] - extensions.remove(libev_ext) + +sys.argv = [a for a in sys.argv if a not in ("--no-murmur3", "--no-libev", "--no-cython", "--no-extensions")] is_windows = os.name == 'nt' if is_windows: From d14c627a7683f56fdd305e02b737c9df3b9827ef Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 8 Jul 2015 11:56:59 -0500 Subject: [PATCH 1552/3726] Reorganize conditional extensions for windows also, don't pass gcc-specific compile options when building cython modules for windows --- setup.py | 41 +++++++++++++++++++---------------------- 1 file changed, 19 insertions(+), 22 deletions(-) diff --git a/setup.py b/setup.py index b375458153..b5a08ab21c 100644 --- a/setup.py +++ b/setup.py @@ -234,20 +234,37 @@ def run_setup(extensions): ], **kw) +is_windows = os.name == 'nt' +if is_windows: + build_extensions.error_message = """ +=============================================================================== +WARNING: could not compile %s. + +The C extensions are not required for the driver to run, but they add support +for token-aware routing with the Murmur3Partitioner. + +On Windows, make sure Visual Studio or an SDK is installed, and your environment +is configured to build for the appropriate architecture (matching your Python runtime). +This is often a matter of using vcvarsall.bat from your install directory, or running +from a command prompt in the Visual Studio Tools Start Menu. +=============================================================================== +""" + extensions = [] if "--no-murmur3" not in sys.argv: extensions.append(murmur3_ext) -if "--no-libev" not in sys.argv: +if "--no-libev" not in sys.argv and not is_windows: extensions.append(libev_ext) if "--no-cython" not in sys.argv: try: from Cython.Build import cythonize cython_candidates = ['cluster', 'concurrent', 'connection', 'cqltypes', 'marshal', 'metadata', 'pool', 'protocol', 'query', 'util'] + compile_args = [] if is_windows else ['-Wno-unused-function'] extensions.extend(cythonize( - [Extension('cassandra.%s' % m, ['cassandra/%s.py' % m], extra_compile_args=['-Wno-unused-function']) for m in cython_candidates], + [Extension('cassandra.%s' % m, ['cassandra/%s.py' % m], extra_compile_args=compile_args) for m in cython_candidates], exclude_failures=True)) except ImportError: warnings.warn("Cython is not installed. Not compiling core driver files as extensions (optional).") @@ -257,26 +274,6 @@ def run_setup(extensions): sys.argv = [a for a in sys.argv if a not in ("--no-murmur3", "--no-libev", "--no-cython", "--no-extensions")] -is_windows = os.name == 'nt' -if is_windows: - # libev is difficult to build, and uses select in Windows. - try: - extensions.remove(libev_ext) - except ValueError: - pass - build_extensions.error_message = """ -=============================================================================== -WARNING: could not compile %s. - -The C extensions are not required for the driver to run, but they add support -for token-aware routing with the Murmur3Partitioner. - -On Windows, make sure Visual Studio or an SDK is installed, and your environment -is configured to build for the appropriate architecture (matching your Python runtime). -This is often a matter of using vcvarsall.bat from your install directory, or running -from a command prompt in the Visual Studio Tools Start Menu. -=============================================================================== -""" platform_unsupported_msg = \ """ From 0ef3f6701d577e0f4d6a7e6753143bf485d674b6 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 9 Jul 2015 11:49:14 -0500 Subject: [PATCH 1553/3726] cqle: don't depend on position of keyword argument in mock call assertions Makes tests more robust, and able to run when core is cythonized Also removed autospec; autospec introspection introduced in Python 3.4 did not play well with cython method. --- .../cqlengine/query/test_batch_query.py | 8 +++---- .../cqlengine/query/test_queryset.py | 24 +++++++++---------- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/tests/integration/cqlengine/query/test_batch_query.py b/tests/integration/cqlengine/query/test_batch_query.py index d9dc33c22d..4a3e21318c 100644 --- a/tests/integration/cqlengine/query/test_batch_query.py +++ b/tests/integration/cqlengine/query/test_batch_query.py @@ -187,13 +187,13 @@ def test_batch_execute_on_exception_skips_if_not_specified(self): self.assertEqual(0, len(obj)) def test_batch_execute_timeout(self): - with mock.patch.object(Session, 'execute', autospec=True) as mock_execute: + with mock.patch.object(Session, 'execute') as mock_execute: with BatchQuery(timeout=1) as b: BatchQueryLogModel.batch(b).create(k=2, v=2) - mock_execute.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY, timeout=1) + self.assertEqual(mock_execute.call_args[-1]['timeout'], 1) def test_batch_execute_no_timeout(self): - with mock.patch.object(Session, 'execute', autospec=True) as mock_execute: + with mock.patch.object(Session, 'execute') as mock_execute: with BatchQuery() as b: BatchQueryLogModel.batch(b).create(k=2, v=2) - mock_execute.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY, timeout=NOT_SET) + self.assertEqual(mock_execute.call_args[-1]['timeout'], NOT_SET) diff --git a/tests/integration/cqlengine/query/test_queryset.py b/tests/integration/cqlengine/query/test_queryset.py index 5c2b76f985..7bb101b92b 100644 --- a/tests/integration/cqlengine/query/test_queryset.py +++ b/tests/integration/cqlengine/query/test_queryset.py @@ -712,19 +712,19 @@ class PagingTest(Model): class ModelQuerySetTimeoutTestCase(BaseQuerySetUsage): def test_default_timeout(self): - with mock.patch.object(Session, 'execute', autospec=True) as mock_execute: + with mock.patch.object(Session, 'execute') as mock_execute: list(TestModel.objects()) - mock_execute.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY, timeout=NOT_SET) + self.assertEqual(mock_execute.call_args[-1]['timeout'], NOT_SET) def test_float_timeout(self): - with mock.patch.object(Session, 'execute', autospec=True) as mock_execute: + with mock.patch.object(Session, 'execute') as mock_execute: list(TestModel.objects().timeout(0.5)) - mock_execute.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY, timeout=0.5) + self.assertEqual(mock_execute.call_args[-1]['timeout'], 0.5) def test_none_timeout(self): - with mock.patch.object(Session, 'execute', autospec=True) as mock_execute: + with mock.patch.object(Session, 'execute') as mock_execute: list(TestModel.objects().timeout(None)) - mock_execute.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY, timeout=None) + self.assertEqual(mock_execute.call_args[-1]['timeout'], None) class DMLQueryTimeoutTestCase(BaseQuerySetUsage): @@ -733,19 +733,19 @@ def setUp(self): super(DMLQueryTimeoutTestCase, self).setUp() def test_default_timeout(self): - with mock.patch.object(Session, 'execute', autospec=True) as mock_execute: + with mock.patch.object(Session, 'execute') as mock_execute: self.model.save() - mock_execute.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY, timeout=NOT_SET) + self.assertEqual(mock_execute.call_args[-1]['timeout'], NOT_SET) def test_float_timeout(self): - with mock.patch.object(Session, 'execute', autospec=True) as mock_execute: + with mock.patch.object(Session, 'execute') as mock_execute: self.model.timeout(0.5).save() - mock_execute.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY, timeout=0.5) + self.assertEqual(mock_execute.call_args[-1]['timeout'], 0.5) def test_none_timeout(self): - with mock.patch.object(Session, 'execute', autospec=True) as mock_execute: + with mock.patch.object(Session, 'execute') as mock_execute: self.model.timeout(None).save() - mock_execute.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY, timeout=None) + self.assertEqual(mock_execute.call_args[-1]['timeout'], None) def test_timeout_then_batch(self): b = query.BatchQuery() From 71342f6c9db6e1592620c438bf0cdc27eefc4a22 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 9 Jul 2015 11:51:40 -0500 Subject: [PATCH 1554/3726] Make tox unit tests run from alternate directory Ensures that build, installed extensions are used, instead of relying on what is inplace for the package under test. This allows us to test differing combinations of optional extensions (cython) without any assumptions about the local package state. Also: - consolidate py26 section and place unittest2 as conditional dep - introduce conditional cython dep, based on env var --- tox.ini | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) diff --git a/tox.ini b/tox.ini index 23b2aee1f4..433e254004 100644 --- a/tox.ini +++ b/tox.ini @@ -1,5 +1,5 @@ [tox] -envlist = py26,py27,pypy,py33,py34 +envlist = py{26,27,33,34},pypy [base] deps = nose @@ -11,19 +11,15 @@ deps = nose deps = {[base]deps} sure==1.2.3 blist + {env:CYTHON_DEP:} + py26: unittest2 setenv = USE_CASS_EXTERNAL=1 -commands = {envpython} setup.py build_ext --inplace - nosetests --verbosity=2 tests/unit/ - nosetests --verbosity=2 tests/integration/cqlengine - -[testenv:py26] -deps = {[testenv]deps} - unittest2 -# test skipping is different in unittest2 for python 2.7+; let's just use it where needed +changedir = {envtmpdir} +commands = nosetests --verbosity=2 --no-path-adjustment {toxinidir}/tests/unit/ + nosetests --verbosity=2 --no-path-adjustment {toxinidir}/tests/integration/cqlengine [testenv:pypy] deps = {[base]deps} -commands = {envpython} setup.py build_ext --inplace - nosetests --verbosity=2 tests/unit/ +commands = nosetests --verbosity=2 {toxinidir}/tests/unit/ # cqlengine/test_timestamp.py uses sure, which fails in pypy presently # could remove sure usage From aa328356605cfefe54903f8b1396b01befa85937 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 9 Jul 2015 11:54:36 -0500 Subject: [PATCH 1555/3726] Add travis env configs to test optional cython extensions --- .travis.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.travis.yml b/.travis.yml index 83ffff8fe1..3aa6228b92 100644 --- a/.travis.yml +++ b/.travis.yml @@ -2,12 +2,16 @@ language: python python: 2.7 env: - TOX_ENV=py26 CASS_VER=21 + - TOX_ENV=py26 CASS_VER=21 CYTHON_DEP=cython - TOX_ENV=py27 CASS_VER=12 - TOX_ENV=py27 CASS_VER=20 - TOX_ENV=py27 CASS_VER=21 + - TOX_ENV=py27 CASS_VER=21 CYTHON_DEP=cython - TOX_ENV=pypy CASS_VER=21 - TOX_ENV=py33 CASS_VER=21 + - TOX_ENV=py33 CASS_VER=21 CYTHON_DEP=cython - TOX_ENV=py34 CASS_VER=21 + - TOX_ENV=py34 CASS_VER=21 CYTHON_DEP=cython before_install: - sudo apt-get update -y From 376cb0c10c2a8a77eb4d7df944c18e314012aeef Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 9 Jul 2015 12:44:49 -0500 Subject: [PATCH 1556/3726] print instead of warn when not building cython extensions --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index b5a08ab21c..54527d1a49 100644 --- a/setup.py +++ b/setup.py @@ -267,7 +267,7 @@ def run_setup(extensions): [Extension('cassandra.%s' % m, ['cassandra/%s.py' % m], extra_compile_args=compile_args) for m in cython_candidates], exclude_failures=True)) except ImportError: - warnings.warn("Cython is not installed. Not compiling core driver files as extensions (optional).") + print("Cython is not installed. Not compiling core driver files as extensions (optional).") if "--no-extensions" in sys.argv: extensions = [] From 5030c35d5312c55b238e191112ef586d53498329 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 9 Jul 2015 14:12:48 -0500 Subject: [PATCH 1557/3726] release procedure: add step to update cassandra-test branch --- README-dev.rst | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/README-dev.rst b/README-dev.rst index 3d1f9b19f8..697b83f16a 100644 --- a/README-dev.rst +++ b/README-dev.rst @@ -23,6 +23,11 @@ Releasing * After a beta or rc release, this should look like ``(2, 1, '0b1', 'post')`` * Commit and push +* Update 'cassandra-test' branch to reflect new release + + * this is typically a matter of merging or rebasing onto master + * test and push updated branch to origin + * Update the JIRA versions: https://datastax-oss.atlassian.net/plugins/servlet/project-config/PYTHON/versions * Make an announcement on the mailing list From e9719ae24ef4161ada02d8f8fbf41a3ed1df5cfc Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 9 Jul 2015 16:07:39 -0500 Subject: [PATCH 1558/3726] Swallow KeyErr for meta DROP updates with missing keyspace Makes control connection robust against out-of-order async events when it doesn't matter (if KS is already gone, sub-component is dropped). --- cassandra/metadata.py | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 8d550cee41..6e98d51336 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -210,31 +210,37 @@ def usertype_changed(self, keyspace, name, type_results): new_usertype = self._build_usertype(keyspace, type_results[0]) self.keyspaces[keyspace].user_types[name] = new_usertype else: - # the type was deleted - self.keyspaces[keyspace].user_types.pop(name, None) + try: + self.keyspaces[keyspace].user_types.pop(name, None) + except KeyError: + pass def function_changed(self, keyspace, function, function_results): if function_results: new_function = self._build_function(keyspace, function_results[0]) self.keyspaces[keyspace].functions[function.signature] = new_function else: - # the function was deleted - self.keyspaces[keyspace].functions.pop(function.signature, None) + try: + self.keyspaces[keyspace].functions.pop(function.signature, None) + except KeyError: + pass def aggregate_changed(self, keyspace, aggregate, aggregate_results): if aggregate_results: new_aggregate = self._build_aggregate(keyspace, aggregate_results[0]) self.keyspaces[keyspace].aggregates[aggregate.signature] = new_aggregate else: - # the aggregate was deleted - self.keyspaces[keyspace].aggregates.pop(aggregate.signature, None) + try: + self.keyspaces[keyspace].aggregates.pop(aggregate.signature, None) + except KeyError: + pass def table_changed(self, keyspace, table, cf_results, col_results, triggers_result): try: keyspace_meta = self.keyspaces[keyspace] except KeyError: # we're trying to update a table in a keyspace we don't know about - log.error("Tried to update schema for table '%s' in unknown keyspace '%s'", + log.warn("Tried to update schema for table '%s' in unknown keyspace '%s'", table, keyspace) return From 5777c204698543e1c18870173a2b3417f668324e Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 9 Jul 2015 16:53:51 -0500 Subject: [PATCH 1559/3726] Attempt to process correlated events the order in which the arrive Events are mostly processed in order. "mostly" because there is still a race by way of the cluster thread pool executor. Meta class should be robust against this. --- cassandra/cluster.py | 22 +++++++++++++++++++--- cassandra/metadata.py | 19 +++++++------------ tests/unit/test_control_connection.py | 4 ++-- 3 files changed, 28 insertions(+), 17 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 45784c4623..fb697a182a 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2065,6 +2065,8 @@ def __init__(self, cluster, timeout, self._reconnection_handler = None self._reconnection_lock = RLock() + self._event_schedule_times = {} + def connect(self): if self._is_shutdown: return @@ -2501,12 +2503,26 @@ def _update_location_info(self, host, datacenter, rack): self._cluster.load_balancing_policy.on_up(host) return True + def _delay_for_event_type(self, event_type, delay_window): + # this serves to order processing correlated events (received within the window) + # the window and randomization still have the desired effect of skew across client instances + next_time = self._event_schedule_times.get(event_type, 0) + now = self._time.time() + if now <= next_time: + this_time = next_time + 0.01 + delay = this_time - now + else: + delay = random() * delay_window + this_time = now + delay + self._event_schedule_times[event_type] = this_time + return delay + def _handle_topology_change(self, event): change_type = event["change_type"] addr, port = event["address"] if change_type == "NEW_NODE" or change_type == "MOVED_NODE": if self._topology_event_refresh_window >= 0: - delay = random() * self._topology_event_refresh_window + delay = self._delay_for_event_type('topology_change', self._topology_event_refresh_window) self._cluster.scheduler.schedule_unique(delay, self.refresh_node_list_and_token_map) elif change_type == "REMOVED_NODE": host = self._cluster.metadata.get_host(addr) @@ -2517,7 +2533,7 @@ def _handle_status_change(self, event): addr, port = event["address"] host = self._cluster.metadata.get_host(addr) if change_type == "UP": - delay = 1 + random() * 0.5 # randomness to avoid thundering herd problem on events + delay = 1 + self._delay_for_event_type('status_change', 0.5) # randomness to avoid thundering herd problem on events if host is None: # this is the first time we've seen the node self._cluster.scheduler.schedule_unique(delay, self.refresh_node_list_and_token_map) @@ -2540,7 +2556,7 @@ def _handle_schema_change(self, event): usertype = event.get('type') function = event.get('function') aggregate = event.get('aggregate') - delay = random() * self._schema_event_refresh_window + delay = self._delay_for_event_type('schema_change', self._schema_event_refresh_window) self._cluster.scheduler.schedule_unique(delay, self.refresh_schema, keyspace, table, usertype, function, aggregate) def wait_for_schema_agreement(self, connection=None, preloaded_results=None, wait_time=None): diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 6e98d51336..f977e0ffae 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -236,21 +236,16 @@ def aggregate_changed(self, keyspace, aggregate, aggregate_results): pass def table_changed(self, keyspace, table, cf_results, col_results, triggers_result): - try: - keyspace_meta = self.keyspaces[keyspace] - except KeyError: - # we're trying to update a table in a keyspace we don't know about - log.warn("Tried to update schema for table '%s' in unknown keyspace '%s'", - table, keyspace) - return - - if not cf_results: - # the table was removed - keyspace_meta._drop_table_metadata(table) - else: + if cf_results: assert len(cf_results) == 1 + keyspace_meta = self.keyspaces[keyspace] table_meta = self._build_table_metadata(keyspace_meta, cf_results[0], {table: col_results}, {table: triggers_result}) keyspace_meta._add_table_metadata(table_meta) + else: + try: + self.keyspaces[keyspace]._drop_table_metadata(table) + except KeyError: + pass def _keyspace_added(self, ksname): if self.token_map: diff --git a/tests/unit/test_control_connection.py b/tests/unit/test_control_connection.py index 9c93f5afe4..1429531c2d 100644 --- a/tests/unit/test_control_connection.py +++ b/tests/unit/test_control_connection.py @@ -410,12 +410,12 @@ def test_handle_schema_change(self): } self.cluster.scheduler.reset_mock() self.control_connection._handle_schema_change(event) - self.cluster.scheduler.schedule_unique.assert_called_once_with(0.0, self.control_connection.refresh_schema, 'ks1', 'table1', None, None, None) + self.cluster.scheduler.schedule_unique.assert_called_once_with(ANY, self.control_connection.refresh_schema, 'ks1', 'table1', None, None, None) self.cluster.scheduler.reset_mock() event['table'] = None self.control_connection._handle_schema_change(event) - self.cluster.scheduler.schedule_unique.assert_called_once_with(0.0, self.control_connection.refresh_schema, 'ks1', None, None, None, None) + self.cluster.scheduler.schedule_unique.assert_called_once_with(ANY, self.control_connection.refresh_schema, 'ks1', None, None, None, None) def test_refresh_disabled(self): cluster = MockCluster() From fafb29d622d3baa79aef0cee96a5c8e03e27d3e8 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 10 Jul 2015 09:52:48 -0500 Subject: [PATCH 1560/3726] Limit mock dependency to version 1.0.1 1.1.0 dropped Python 2.6 support, and at least one call assert function currently used by tests. --- setup.py | 2 +- test-requirements.txt | 2 +- tox.ini | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/setup.py b/setup.py index e85d332d37..ea6f4f0514 100644 --- a/setup.py +++ b/setup.py @@ -216,7 +216,7 @@ def run_setup(extensions): keywords='cassandra,cql,orm', include_package_data=True, install_requires=dependencies, - tests_require=['nose', 'mock', 'PyYAML', 'pytz', 'sure'], + tests_require=['nose', 'mock<=1.0.1', 'PyYAML', 'pytz', 'sure'], classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', diff --git a/test-requirements.txt b/test-requirements.txt index a90f1ad5c5..cc7b3c2b2e 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -2,7 +2,7 @@ blist scales nose -mock +mock<=1.0.1 ccm>=2.0 unittest2 PyYAML diff --git a/tox.ini b/tox.ini index 23b2aee1f4..b6b0b519dc 100644 --- a/tox.ini +++ b/tox.ini @@ -3,7 +3,7 @@ envlist = py26,py27,pypy,py33,py34 [base] deps = nose - mock + mock<=1.0.1 PyYAML six From 870d76946b90de2fdec887ac238ecf71f1145c20 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 10 Jul 2015 09:52:48 -0500 Subject: [PATCH 1561/3726] Limit mock dependency to version 1.0.1 1.1.0 dropped Python 2.6 support, and at least one call assert function currently used by tests. --- setup.py | 2 +- test-requirements.txt | 2 +- tox.ini | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/setup.py b/setup.py index e85d332d37..ea6f4f0514 100644 --- a/setup.py +++ b/setup.py @@ -216,7 +216,7 @@ def run_setup(extensions): keywords='cassandra,cql,orm', include_package_data=True, install_requires=dependencies, - tests_require=['nose', 'mock', 'PyYAML', 'pytz', 'sure'], + tests_require=['nose', 'mock<=1.0.1', 'PyYAML', 'pytz', 'sure'], classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', diff --git a/test-requirements.txt b/test-requirements.txt index a90f1ad5c5..cc7b3c2b2e 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -2,7 +2,7 @@ blist scales nose -mock +mock<=1.0.1 ccm>=2.0 unittest2 PyYAML diff --git a/tox.ini b/tox.ini index 23b2aee1f4..b6b0b519dc 100644 --- a/tox.ini +++ b/tox.ini @@ -3,7 +3,7 @@ envlist = py26,py27,pypy,py33,py34 [base] deps = nose - mock + mock<=1.0.1 PyYAML six From 1f0c6c312bce36d0892e18725baace3a0ae02a28 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 10 Jul 2015 09:52:48 -0500 Subject: [PATCH 1562/3726] Limit mock dependency to version 1.0.1 1.1.0 dropped Python 2.6 support, and at least one call assert function currently used by tests. --- setup.py | 2 +- test-requirements.txt | 2 +- tox.ini | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/setup.py b/setup.py index e85d332d37..ea6f4f0514 100644 --- a/setup.py +++ b/setup.py @@ -216,7 +216,7 @@ def run_setup(extensions): keywords='cassandra,cql,orm', include_package_data=True, install_requires=dependencies, - tests_require=['nose', 'mock', 'PyYAML', 'pytz', 'sure'], + tests_require=['nose', 'mock<=1.0.1', 'PyYAML', 'pytz', 'sure'], classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', diff --git a/test-requirements.txt b/test-requirements.txt index a90f1ad5c5..cc7b3c2b2e 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -2,7 +2,7 @@ blist scales nose -mock +mock<=1.0.1 ccm>=2.0 unittest2 PyYAML diff --git a/tox.ini b/tox.ini index 23b2aee1f4..b6b0b519dc 100644 --- a/tox.ini +++ b/tox.ini @@ -3,7 +3,7 @@ envlist = py26,py27,pypy,py33,py34 [base] deps = nose - mock + mock<=1.0.1 PyYAML six From 6be945b6e9061ddf416970a398a7c2ae9355dced Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 10 Jul 2015 17:06:33 -0500 Subject: [PATCH 1563/3726] Initial version of generator-based concurrent executor not fully tested --- cassandra/concurrent.py | 86 ++++++++++++++++++++++++++++++++++++++--- 1 file changed, 80 insertions(+), 6 deletions(-) diff --git a/cassandra/concurrent.py b/cassandra/concurrent.py index b96c921bd2..81c6938178 100644 --- a/cassandra/concurrent.py +++ b/cassandra/concurrent.py @@ -24,7 +24,6 @@ log = logging.getLogger(__name__) - def execute_concurrent(session, statements_and_parameters, concurrency=100, raise_on_first_error=True): """ Executes a sequence of (statement, parameters) tuples concurrently. Each @@ -74,11 +73,6 @@ def execute_concurrent(session, statements_and_parameters, concurrency=100, rais if not statements_and_parameters: return [] - # TODO handle iterators and generators naturally without converting the - # whole thing to a list. This would require not building a result - # list of Nones up front (we don't know how many results there will be), - # so a dict keyed by index should be used instead. The tricky part is - # knowing when you're the final statement to finish. statements_and_parameters = list(statements_and_parameters) event = Event() @@ -101,6 +95,86 @@ def execute_concurrent(session, statements_and_parameters, concurrency=100, rais else: return results +from threading import Lock, Condition +from heapq import heappush, heappop + +class ConcurrentExecutor(object): + + def __init__(self, session, statements_and_params): + self.session = session + self._enum_statements = enumerate(iter(statements_and_params)) + self._lock = Lock() + self._condition = Condition(self._lock) + self._fail_fast = True + self._results_queue = [] + self._exec_count = 0 + + def execute(self, concurrency, fail_fast): + self._fail_fast = fail_fast + self._results_queue = [] + self._exec_count = 0 + with self._lock: + for n in xrange(concurrency): + if not self._execute_next(): + break + return self._results() + + def _execute_next(self): + # lock must be held + try: + (idx, (statement, params)) = next(self._enum_statements) + self._exec_count += 1 + self._execute(idx, statement, params) + return True + except StopIteration: + pass + + def _execute(self, idx, statement, params): + try: + future = self.session.execute_async(statement, params, timeout=None) + args = (idx,) + future.add_callbacks( + callback=self._on_success, callback_args=args, + errback=self._on_error, errback_args=args) + except Exception as exc: + e = sys.exc_info() if six.PY2 else exc + self._on_error(e, idx) + + def _on_success(self, result, idx): + self._put_result(idx, True, result) + + def _on_error(self, result, idx): + self._put_result(idx, False, result) + + def _put_result(self, idx, success, result): + with self._condition: + heappush(self._results_queue, (idx, (success, result))) + self._execute_next() + self._condition.notify() + + def _results(self): + # todo: done condition + current = 0 + with self._condition: + while current < self._exec_count: + while not self._results_queue or self._results_queue[0][0] != current: + self._condition.wait() + while self._results_queue and self._results_queue[0][0] == current: + _, res = heappop(self._results_queue) + if self._fail_fast and not res[0]: + self._raise(res[1]) + yield res + current += 1 + + @staticmethod + def _raise(exc): + if six.PY2 and isinstance(exc, tuple): + (exc_type, value, traceback) = exc + six.reraise(exc_type, value, traceback) + else: + raise exc + + def execute_concurrent_with_args(session, statement, parameters, *args, **kwargs): """ From 6eebdebd64de676188f8c158f6b620b8dba9cbad Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 13 Jul 2015 17:40:11 -0500 Subject: [PATCH 1564/3726] Another iteration on updated schema This version parses the new tables schema. API is a little messy -- I would like to revisit that. PYTHON-276 --- cassandra/metadata.py | 176 +++++++++++++++----- tests/integration/standard/test_metadata.py | 6 +- 2 files changed, 134 insertions(+), 48 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 6b177d7049..8bca0beed1 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -960,30 +960,6 @@ def primary_key(self): table. """ - recognized_options = ( - "comment", - "read_repair_chance", - "dclocal_read_repair_chance", # kept to be safe, but see _build_table_options() - "local_read_repair_chance", - "replicate_on_write", - "gc_grace_seconds", - "bloom_filter_fp_chance", - "caching", - "compaction_strategy_class", - "compaction_strategy_options", - "min_compaction_threshold", - "max_compaction_threshold", - "compression_parameters", - "min_index_interval", - "max_index_interval", - "index_interval", - "speculative_retry", - "rows_per_partition_to_cache", - "memtable_flush_period_in_ms", - "populate_io_cache_on_flush", - "compression", - "default_time_to_live") - compaction_options = { "min_compaction_threshold": "min_threshold", "max_compaction_threshold": "max_threshold", @@ -999,16 +975,19 @@ def is_cql_compatible(self): """ A boolean indicating if this table can be represented as CQL in export """ - # no such thing as DCT in CQL - incompatible = issubclass(self.comparator, types.DynamicCompositeType) + comparator = getattr(self, 'comparator', None) + if comparator: + # no such thing as DCT in CQL + incompatible = issubclass(self.comparator, types.DynamicCompositeType) - # no compact storage with more than one column beyond PK if there - # are clustering columns - incompatible |= (self.is_compact_storage and - len(self.columns) > len(self.primary_key) + 1 and - len(self.clustering_key) >= 1) + # no compact storage with more than one column beyond PK if there + # are clustering columns + incompatible |= (self.is_compact_storage and + len(self.columns) > len(self.primary_key) + 1 and + len(self.clustering_key) >= 1) - return not incompatible + return not incompatible + return True def __init__(self, keyspace_name, name, partition_key=None, clustering_key=None, columns=None, triggers=None, options=None): self.keyspace_name = keyspace_name @@ -1101,18 +1080,10 @@ def as_cql_query(self, formatted=False): if self.clustering_key: cluster_str = "CLUSTERING ORDER BY " - clustering_names = protect_names([c.name for c in self.clustering_key]) - - if self.is_compact_storage and \ - not issubclass(self.comparator, types.CompositeType): - subtypes = [self.comparator] - else: - subtypes = self.comparator.subtypes - inner = [] - for colname, coltype in zip(clustering_names, subtypes): - ordering = "DESC" if issubclass(coltype, types.ReversedType) else "ASC" - inner.append("%s %s" % (colname, ordering)) + for col in self.clustering_key: + ordering = "DESC" if issubclass(col.data_type, types.ReversedType) else "ASC" + inner.append("%s %s" % (protect_name(col.name), ordering)) cluster_str += "(%s)" % ", ".join(inner) option_strings.append(cluster_str) @@ -1545,6 +1516,30 @@ class SchemaParserV22(_SchemaParser): _table_name_col = 'columnfamily_name' + recognized_table_options = ( + "comment", + "read_repair_chance", + "dclocal_read_repair_chance", # kept to be safe, but see _build_table_options() + "local_read_repair_chance", + "replicate_on_write", + "gc_grace_seconds", + "bloom_filter_fp_chance", + "caching", + "compaction_strategy_class", + "compaction_strategy_options", + "min_compaction_threshold", + "max_compaction_threshold", + "compression_parameters", + "min_index_interval", + "max_index_interval", + "index_interval", + "speculative_retry", + "rows_per_partition_to_cache", + "memtable_flush_period_in_ms", + "populate_io_cache_on_flush", + "compression", + "default_time_to_live") + def __init__(self, connection, timeout): super(SchemaParserV22, self).__init__(connection, timeout) self.keyspaces_result = [] @@ -1809,10 +1804,9 @@ def _build_table_metadata(self, row, col_rows, trigger_rows): return table_meta - @staticmethod - def _build_table_options(row): + def _build_table_options(self, row): """ Setup the mostly-non-schema table options, like caching settings """ - options = dict((o, row.get(o)) for o in TableMetadata.recognized_options if o in row) + options = dict((o, row.get(o)) for o in self.recognized_table_options if o in row) # the option name when creating tables is "dclocal_read_repair_chance", # but the column name in system.schema_columnfamilies is @@ -1960,6 +1954,21 @@ class SchemaParserV3(SchemaParserV22): _table_name_col = 'table_name' + recognized_table_options = ( + 'bloom_filter_fp_chance', + 'caching', + 'comment', + 'compaction', + 'compression', + 'dclocal_read_repair_chance', + 'default_time_to_live', + 'gc_grace_seconds', + 'max_index_interval', + 'memtable_flush_period_in_ms', + 'min_index_interval', + 'read_repair_chance', + 'speculative_retry') + @staticmethod def _build_keyspace_metadata(row): name = row["keyspace_name"] @@ -1968,6 +1977,81 @@ def _build_keyspace_metadata(row): strategy_class = strategy_options.pop("class") return KeyspaceMetadata(name, durable_writes, strategy_class, strategy_options) + def _build_table_metadata(self, row, col_rows, trigger_rows): + keyspace_name = row["keyspace_name"] + table_name = row[self._table_name_col] + cf_col_rows = col_rows.get(table_name, []) + + if not cf_col_rows: # CASSANDRA-8487 + log.warning("Building table metadata with no column meta for %s.%s", + keyspace_name, table_name) + + table_meta = TableMetadataV3(keyspace_name, table_name) + + for col_row in cf_col_rows: + column_meta = self._build_column_metadata(table_meta, col_row) + table_meta.columns[column_meta.name] = column_meta + + # partition key + partition_rows = [r for r in cf_col_rows + if r.get('type', None) == "partition_key"] + if len(partition_rows) > 1: + partition_rows = sorted(partition_rows, key=lambda row: row.get('component_index')) + for r in partition_rows: + table_meta.partition_key.append(table_meta.columns[r.get('column_name')]) + + # clustering key + clustering_rows = [r for r in cf_col_rows + if r.get('type', None) == "clustering"] + if len(clustering_rows) > 1: + clustering_rows = sorted(clustering_rows, key=lambda row: row.get('component_index')) + for r in clustering_rows: + table_meta.clustering_key.append(table_meta.columns[r.get('column_name')]) + + if trigger_rows: + for trigger_row in trigger_rows[table_name]: + trigger_meta = self._build_trigger_metadata(table_meta, trigger_row) + table_meta.triggers[trigger_meta.name] = trigger_meta + + table_meta.options = self._build_table_options(row) + flags = row.get('flags', set()) + if flags: + table_meta.is_compact_storage = 'dense' in flags or 'super' in flags or 'compound' not in flags + return table_meta + + def _build_table_options(self, row): + """ Setup the mostly-non-schema table options, like caching settings """ + return dict((o, row.get(o)) for o in self.recognized_table_options if o in row) + +class TableMetadataV3(TableMetadata): + """ + For now, until I figure out what to do with this API + """ + compaction_options = {} + + _option_maps = ['caching'] + option_maps = ['compaction', 'compression', 'caching'] + + @property + def is_cql_compatible(self): + return True + + def _make_option_strings(self): + ret = [] + options_copy = dict(self.options.items()) + + for option in self.option_maps: + value = options_copy.pop(option, {}) + params = ("'%s': '%s'" % (k, v) for k, v in value.items()) + ret.append("%s = {%s}" % (option, ', '.join(params))) + + for name, value in options_copy.items(): + if value is not None: + if name == "comment": + value = value or "" + ret.append("%s = %s" % (name, protect_value(value))) + + return list(sorted(ret)) def get_schema_parser(connection, timeout): server_version = connection.server_version diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 582977a95e..3152fdb311 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -28,7 +28,7 @@ from cassandra.cqltypes import DoubleType, Int32Type, ListType, UTF8Type, MapType from cassandra.encoder import Encoder from cassandra.metadata import (Metadata, KeyspaceMetadata, TableMetadata, IndexMetadata, - Token, MD5Token, TokenMap, murmur3, Function, Aggregate) + Token, MD5Token, TokenMap, murmur3, Function, Aggregate, get_schema_parser) from cassandra.policies import SimpleConvictionPolicy from cassandra.pool import Host @@ -132,8 +132,10 @@ def test_basic_table_meta_properties(self): self.assertEqual([], tablemeta.clustering_key) self.assertEqual([u'a', u'b', u'c'], sorted(tablemeta.columns.keys())) + parser = get_schema_parser(self.cluster.control_connection._connection, 1) + for option in tablemeta.options: - self.assertIn(option, TableMetadata.recognized_options) + self.assertIn(option, parser.recognized_table_options) self.check_create_statement(tablemeta, create_statement) From dc24bf1d0ea23c5a6713f3c30988c09446a8fa01 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 14 Jul 2015 11:26:10 -0500 Subject: [PATCH 1565/3726] schema meta: make sure columns added in key order also modify table build based on compact static condition --- cassandra/metadata.py | 45 ++++++++++++++++++++++++++++++------------- 1 file changed, 32 insertions(+), 13 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 8bca0beed1..014390bdea 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -1988,9 +1988,15 @@ def _build_table_metadata(self, row, col_rows, trigger_rows): table_meta = TableMetadataV3(keyspace_name, table_name) - for col_row in cf_col_rows: - column_meta = self._build_column_metadata(table_meta, col_row) - table_meta.columns[column_meta.name] = column_meta + table_meta.options = self._build_table_options(row) + flags = row.get('flags', set()) + if flags: + compact_static = False + table_meta.is_compact_storage = 'dense' in flags or 'super' in flags or 'compound' not in flags + else: + # example: create table t (a int, b int, c int, primary key((a,b))) with compact storage + compact_static = True + table_meta.is_compact_storage = True # partition key partition_rows = [r for r in cf_col_rows @@ -1998,25 +2004,38 @@ def _build_table_metadata(self, row, col_rows, trigger_rows): if len(partition_rows) > 1: partition_rows = sorted(partition_rows, key=lambda row: row.get('component_index')) for r in partition_rows: + # we have to add meta here (and not in the later loop) because TableMetadata.columns is an + # OrderedDict, and it assumes keys are inserted first, in order, when exporting CQL + column_meta = self._build_column_metadata(table_meta, r) + table_meta.columns[column_meta.name] = column_meta table_meta.partition_key.append(table_meta.columns[r.get('column_name')]) # clustering key - clustering_rows = [r for r in cf_col_rows - if r.get('type', None) == "clustering"] - if len(clustering_rows) > 1: - clustering_rows = sorted(clustering_rows, key=lambda row: row.get('component_index')) - for r in clustering_rows: - table_meta.clustering_key.append(table_meta.columns[r.get('column_name')]) + if not compact_static: + clustering_rows = [r for r in cf_col_rows + if r.get('type', None) == "clustering"] + if len(clustering_rows) > 1: + clustering_rows = sorted(clustering_rows, key=lambda row: row.get('component_index')) + for r in clustering_rows: + column_meta = self._build_column_metadata(table_meta, r) + table_meta.columns[column_meta.name] = column_meta + table_meta.clustering_key.append(table_meta.columns[r.get('column_name')]) + + for col_row in (r for r in cf_col_rows + if r.get('type', None) not in ('parition_key', 'clustering_key')): + column_meta = self._build_column_metadata(table_meta, col_row) + if not compact_static or column_meta.is_static: + # for compact static tables, we omit the clustering key and value, and only add the logical columns. + # They are marked not static so that it generates appropriate CQL + if compact_static: + column_meta.is_static = False + table_meta.columns[column_meta.name] = column_meta if trigger_rows: for trigger_row in trigger_rows[table_name]: trigger_meta = self._build_trigger_metadata(table_meta, trigger_row) table_meta.triggers[trigger_meta.name] = trigger_meta - table_meta.options = self._build_table_options(row) - flags = row.get('flags', set()) - if flags: - table_meta.is_compact_storage = 'dense' in flags or 'super' in flags or 'compound' not in flags return table_meta def _build_table_options(self, row): From 46222fa5b01d98dd7a3413a545f6aa2a3d65df5b Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 14 Jul 2015 16:13:50 -0500 Subject: [PATCH 1566/3726] Allow generator for inputs and outputs of execute_concurrent PYTHON-123 --- cassandra/concurrent.py | 236 ++++++++++++++++------------------------ 1 file changed, 91 insertions(+), 145 deletions(-) diff --git a/cassandra/concurrent.py b/cassandra/concurrent.py index 81c6938178..aba3045605 100644 --- a/cassandra/concurrent.py +++ b/cassandra/concurrent.py @@ -12,33 +12,24 @@ # See the License for the specific language governing permissions and # limitations under the License. + +from heapq import heappush, heappop +from itertools import cycle import six +from six.moves import xrange, zip +from threading import Condition import sys -from itertools import count, cycle -import logging -from six.moves import xrange -from threading import Event - from cassandra.cluster import PagedResult +import logging log = logging.getLogger(__name__) -def execute_concurrent(session, statements_and_parameters, concurrency=100, raise_on_first_error=True): +def execute_concurrent(session, statements_and_parameters, concurrency=100, raise_on_first_error=True, results_generator=False): """ Executes a sequence of (statement, parameters) tuples concurrently. Each ``parameters`` item must be a sequence or :const:`None`. - A sequence of ``(success, result_or_exc)`` tuples is returned in the same - order that the statements were passed in. If ``success`` is :const:`False`, - there was an error executing the statement, and ``result_or_exc`` will be - an :class:`Exception`. If ``success`` is :const:`True`, ``result_or_exc`` - will be the query result. - - If `raise_on_first_error` is left as :const:`True`, execution will stop - after the first failed statement and the corresponding exception will be - raised. - The `concurrency` parameter controls how many statements will be executed concurrently. When :attr:`.Cluster.protocol_version` is set to 1 or 2, it is recommended that this be kept below 100 times the number of @@ -48,6 +39,26 @@ def execute_concurrent(session, statements_and_parameters, concurrency=100, rais substantially impacting throughput. If :attr:`~.Cluster.protocol_version` is 3 or higher, you can safely experiment with higher levels of concurrency. + If `raise_on_first_error` is left as :const:`True`, execution will stop + after the first failed statement and the corresponding exception will be + raised. + + `results_generator` controls how the results are returned. + + If :const:`False`, the results are returned only after all requests have completed. + + If :const:`True`, a generator expression is returned. Using a generator results in + a constrained memory footprint when the results set will be large -- results are yielded + as they return instead of materializing the entire list at once. The trade for lower memory + footprint is marginal CPU overhead (more thread coordination and sorting out-of-order results + on-the-fly). + + A sequence of ``(success, result_or_exc)`` tuples is returned in the same + order that the statements were passed in. If ``success`` is :const:`False`, + there was an error executing the statement, and ``result_or_exc`` will be + an :class:`Exception`. If ``success`` is :const:`True`, ``result_or_exc`` + will be the query result. + Example usage:: select_statement = session.prepare("SELECT * FROM users WHERE id=?") @@ -73,47 +84,27 @@ def execute_concurrent(session, statements_and_parameters, concurrency=100, rais if not statements_and_parameters: return [] - statements_and_parameters = list(statements_and_parameters) + executor = ConcurrentExecutorGenResults(session, statements_and_parameters) if results_generator else ConcurrentExecutorListResults(session, statements_and_parameters) + return executor.execute(concurrency, raise_on_first_error) - event = Event() - first_error = [] if raise_on_first_error else None - to_execute = len(statements_and_parameters) - results = [None] * to_execute - num_finished = count(1) - statements = enumerate(iter(statements_and_parameters)) - for i in xrange(min(concurrency, len(statements_and_parameters))): - _execute_next(_sentinel, i, event, session, statements, results, None, num_finished, to_execute, first_error) - event.wait() - if first_error: - exc = first_error[0] - if six.PY2 and isinstance(exc, tuple): - (exc_type, value, traceback) = exc - six.reraise(exc_type, value, traceback) - else: - raise exc - else: - return results - -from threading import Lock, Condition -from heapq import heappush, heappop - -class ConcurrentExecutor(object): +class _ConcurrentExecutor(object): def __init__(self, session, statements_and_params): self.session = session self._enum_statements = enumerate(iter(statements_and_params)) - self._lock = Lock() - self._condition = Condition(self._lock) - self._fail_fast = True + self._condition = Condition() + self._fail_fast = False self._results_queue = [] + self._current = 0 self._exec_count = 0 def execute(self, concurrency, fail_fast): self._fail_fast = fail_fast self._results_queue = [] + self._current = 0 self._exec_count = 0 - with self._lock: + with self._condition: for n in xrange(concurrency): if not self._execute_next(): break @@ -132,47 +123,81 @@ def _execute_next(self): def _execute(self, idx, statement, params): try: future = self.session.execute_async(statement, params, timeout=None) - args = (idx,) + args = (future, idx) future.add_callbacks( callback=self._on_success, callback_args=args, errback=self._on_error, errback_args=args) except Exception as exc: - e = sys.exc_info() if six.PY2 else exc - self._on_error(e, idx) + # exc_info with fail_fast to preserve stack trace info when raising on the client thread + # (matches previous behavior -- not sure why we wouldn't want stack trace in the other case) + e = sys.exc_info() if self._fail_fast and six.PY2 else exc + self._put_result(e, idx, False) - def _on_success(self, result, idx): - self._put_result(idx, True, result) + def _on_success(self, result, future, idx): + if future.has_more_pages: + result = PagedResult(future, result) + future.clear_callbacks() + self._put_result(result, idx, True) + + def _on_error(self, result, future, idx): + self._put_result(result, idx, False) - def _on_error(self, result, idx): - self._put_result(idx, False, result) + @staticmethod + def _raise(exc): + if six.PY2 and isinstance(exc, tuple): + (exc_type, value, traceback) = exc + six.reraise(exc_type, value, traceback) + else: + raise exc - def _put_result(self, idx, success, result): + +class ConcurrentExecutorGenResults(_ConcurrentExecutor): + + def _put_result(self, result, idx, success): with self._condition: heappush(self._results_queue, (idx, (success, result))) self._execute_next() self._condition.notify() def _results(self): - # todo: done condition - current = 0 with self._condition: - while current < self._exec_count: - while not self._results_queue or self._results_queue[0][0] != current: + while self._current < self._exec_count: + while not self._results_queue or self._results_queue[0][0] != self._current: self._condition.wait() - while self._results_queue and self._results_queue[0][0] == current: + while self._results_queue and self._results_queue[0][0] == self._current: _, res = heappop(self._results_queue) if self._fail_fast and not res[0]: self._raise(res[1]) yield res - current += 1 + self._current += 1 - @staticmethod - def _raise(exc): - if six.PY2 and isinstance(exc, tuple): - (exc_type, value, traceback) = exc - six.reraise(exc_type, value, traceback) - else: - raise exc + +class ConcurrentExecutorListResults(_ConcurrentExecutor): + + _exception = None + + def execute(self, concurrency, fail_fast): + self._exception = None + return super(ConcurrentExecutorListResults, self).execute(concurrency, fail_fast) + + def _put_result(self, result, idx, success): + self._results_queue.append((idx, (success, result))) + with self._condition: + self._current += 1 + if not success and self._fail_fast: + if not self._exception: + self._exception = result + self._condition.notify() + elif not self._execute_next() and self._current == self._exec_count: + self._condition.notify() + + def _results(self): + with self._condition: + while self._current < self._exec_count: + self._condition.wait() + if self._exception and self._fail_fast: + self._raise(self._exception) + return [r[1] for r in sorted(self._results_queue)] @@ -188,83 +213,4 @@ def execute_concurrent_with_args(session, statement, parameters, *args, **kwargs parameters = [(x,) for x in range(1000)] execute_concurrent_with_args(session, statement, parameters, concurrency=50) """ - return execute_concurrent(session, list(zip(cycle((statement,)), parameters)), *args, **kwargs) - - -_sentinel = object() - - -def _handle_error(error, result_index, event, session, statements, results, - future, num_finished, to_execute, first_error): - if first_error is not None: - first_error.append(error) - event.set() - return - else: - results[result_index] = (False, error) - if next(num_finished) >= to_execute: - event.set() - return - - try: - (next_index, (statement, params)) = next(statements) - except StopIteration: - return - - try: - future = session.execute_async(statement, params, timeout=None) - args = (next_index, event, session, statements, results, future, num_finished, to_execute, first_error) - future.add_callbacks( - callback=_execute_next, callback_args=args, - errback=_handle_error, errback_args=args) - except Exception as exc: - if first_error is not None: - if six.PY2: - first_error.append(sys.exc_info()) - else: - first_error.append(exc) - event.set() - return - else: - results[next_index] = (False, exc) - if next(num_finished) >= to_execute: - event.set() - return - - -def _execute_next(result, result_index, event, session, statements, results, - future, num_finished, to_execute, first_error): - if result is not _sentinel: - if future.has_more_pages: - result = PagedResult(future, result) - future.clear_callbacks() - results[result_index] = (True, result) - finished = next(num_finished) - if finished >= to_execute: - event.set() - return - - try: - (next_index, (statement, params)) = next(statements) - except StopIteration: - return - - try: - future = session.execute_async(statement, params, timeout=None) - args = (next_index, event, session, statements, results, future, num_finished, to_execute, first_error) - future.add_callbacks( - callback=_execute_next, callback_args=args, - errback=_handle_error, errback_args=args) - except Exception as exc: - if first_error is not None: - if six.PY2: - first_error.append(sys.exc_info()) - else: - first_error.append(exc) - event.set() - return - else: - results[next_index] = (False, exc) - if next(num_finished) >= to_execute: - event.set() - return + return execute_concurrent(session, zip(cycle((statement,)), parameters), *args, **kwargs) From f7c0bd312df6ac105173969c8b57ee4b8bd9c664 Mon Sep 17 00:00:00 2001 From: John Anderson Date: Thu, 25 Jun 2015 10:59:57 -0700 Subject: [PATCH 1567/3726] Enable C extensions on pypy and add pypy3 support Conflicts: tox.ini --- cassandra/murmur3.c | 10 +++++++++- setup.py | 4 ++-- tox.ini | 11 +++++++++-- 3 files changed, 20 insertions(+), 5 deletions(-) diff --git a/cassandra/murmur3.c b/cassandra/murmur3.c index 657c01f69d..dc98b4919d 100644 --- a/cassandra/murmur3.c +++ b/cassandra/murmur3.c @@ -20,6 +20,13 @@ typedef int Py_ssize_t; #define PY_SSIZE_T_MIN INT_MIN #endif +#ifdef PYPY_VERSION +#define COMPILING_IN_PYPY 1 +#define COMPILING_IN_CPYTHON 0 +#else +#define COMPILING_IN_PYPY 0 +#define COMPILING_IN_CPYTHON 1 +#endif //----------------------------------------------------------------------------- // Platform-specific functions and macros @@ -179,7 +186,8 @@ struct module_state { PyObject *error; }; -#if PY_MAJOR_VERSION >= 3 +// pypy3 doesn't have GetState yet. +#if COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #define GETSTATE(m) ((struct module_state*)PyModule_GetState(m)) #else #define GETSTATE(m) (&_state) diff --git a/setup.py b/setup.py index 54527d1a49..bf12426231 100644 --- a/setup.py +++ b/setup.py @@ -235,6 +235,7 @@ def run_setup(extensions): **kw) is_windows = os.name == 'nt' + if is_windows: build_extensions.error_message = """ =============================================================================== @@ -291,8 +292,7 @@ def run_setup(extensions): if extensions: if (sys.platform.startswith("java") - or sys.platform == "cli" - or "PyPy" in sys.version): + or sys.platform == "cli"): sys.stderr.write(platform_unsupported_msg) extensions = () elif sys.byteorder == "big": diff --git a/tox.ini b/tox.ini index 433e254004..b1f99087ad 100644 --- a/tox.ini +++ b/tox.ini @@ -1,5 +1,5 @@ [tox] -envlist = py{26,27,33,34},pypy +envlist = py{26,27,33,34},pypy,pypy3 [base] deps = nose @@ -20,6 +20,13 @@ commands = nosetests --verbosity=2 --no-path-adjustment {toxinidir}/tests/unit/ [testenv:pypy] deps = {[base]deps} -commands = nosetests --verbosity=2 {toxinidir}/tests/unit/ +commands = {envpython} setup.py build_ext --inplace + nosetests --verbosity=2 tests/unit/ + +[testenv:pypy3] +deps = {[base]deps} +commands = {envpython} setup.py build_ext --inplace + nosetests --verbosity=2 tests/unit/ + # cqlengine/test_timestamp.py uses sure, which fails in pypy presently # could remove sure usage From e08fefc9822d7d8594262e2735fa79e80d6b59f7 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 15 Jul 2015 14:48:43 -0500 Subject: [PATCH 1568/3726] Make tests.unit.io.test_twistedreactor work in Python3 --- tests/unit/io/test_twistedreactor.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/tests/unit/io/test_twistedreactor.py b/tests/unit/io/test_twistedreactor.py index d2142b09ca..9063460931 100644 --- a/tests/unit/io/test_twistedreactor.py +++ b/tests/unit/io/test_twistedreactor.py @@ -153,17 +153,17 @@ def test_handle_read__incomplete(self): Verify that handle_read() processes incomplete messages properly. """ self.obj_ut.process_msg = Mock() - self.assertEqual(self.obj_ut._iobuf.getvalue(), '') # buf starts empty + self.assertEqual(self.obj_ut._iobuf.getvalue(), b'') # buf starts empty # incomplete header - self.obj_ut._iobuf.write('\x84\x00\x00\x00\x00') + self.obj_ut._iobuf.write(b'\x84\x00\x00\x00\x00') self.obj_ut.handle_read() - self.assertEqual(self.obj_ut._iobuf.getvalue(), '\x84\x00\x00\x00\x00') + self.assertEqual(self.obj_ut._iobuf.getvalue(), b'\x84\x00\x00\x00\x00') # full header, but incomplete body - self.obj_ut._iobuf.write('\x00\x00\x00\x15') + self.obj_ut._iobuf.write(b'\x00\x00\x00\x15') self.obj_ut.handle_read() self.assertEqual(self.obj_ut._iobuf.getvalue(), - '\x84\x00\x00\x00\x00\x00\x00\x00\x15') + b'\x84\x00\x00\x00\x00\x00\x00\x00\x15') self.assertEqual(self.obj_ut._current_frame.end_pos, 30) # verify we never attempted to process the incomplete message @@ -174,14 +174,14 @@ def test_handle_read__fullmessage(self): Verify that handle_read() processes complete messages properly. """ self.obj_ut.process_msg = Mock() - self.assertEqual(self.obj_ut._iobuf.getvalue(), '') # buf starts empty + self.assertEqual(self.obj_ut._iobuf.getvalue(), b'') # buf starts empty # write a complete message, plus 'NEXT' (to simulate next message) # assumes protocol v3+ as default Connection.protocol_version - body = 'this is the drum roll' - extra = 'NEXT' + body = b'this is the drum roll' + extra = b'NEXT' self.obj_ut._iobuf.write( - '\x84\x01\x00\x02\x03\x00\x00\x00\x15' + body + extra) + b'\x84\x01\x00\x02\x03\x00\x00\x00\x15' + body + extra) self.obj_ut.handle_read() self.assertEqual(self.obj_ut._iobuf.getvalue(), extra) self.obj_ut.process_msg.assert_called_with( From cf7e6683d3c036b70e17de9fa187ec8e01bf7e6e Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 15 Jul 2015 15:15:08 -0500 Subject: [PATCH 1569/3726] Whitelist murmur3 extension for PyPy runtime --- setup.py | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/setup.py b/setup.py index bf12426231..d89a5ca52b 100644 --- a/setup.py +++ b/setup.py @@ -268,7 +268,7 @@ def run_setup(extensions): [Extension('cassandra.%s' % m, ['cassandra/%s.py' % m], extra_compile_args=compile_args) for m in cython_candidates], exclude_failures=True)) except ImportError: - print("Cython is not installed. Not compiling core driver files as extensions (optional).") + sys.stderr.write("Cython is not installed. Not compiling core driver files as extensions (optional).") if "--no-extensions" in sys.argv: extensions = [] @@ -290,14 +290,25 @@ def run_setup(extensions): =============================================================================== """ +pypy_unsupported_msg = \ +""" +================================================================================= +Some optional C extensions are not supported in PyPy. Only murmur3 will be built. +================================================================================= +""" + if extensions: - if (sys.platform.startswith("java") - or sys.platform == "cli"): + if "PyPy" in sys.version: + sys.stderr.write(pypy_unsupported_msg) + extensions = [ext for ext in extensions if ext is murmur3_ext] + + if (sys.platform.startswith("java") or sys.platform == "cli"): sys.stderr.write(platform_unsupported_msg) - extensions = () + extensions = [] elif sys.byteorder == "big": sys.stderr.write(arch_unsupported_msg) - extensions = () + extensions = [] + while True: # try to build as many of the extensions as we can From 5391a206343deb129db34a252348f56f8c45a451 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 10 Jul 2015 09:52:48 -0500 Subject: [PATCH 1570/3726] Limit mock dependency to version 1.0.1 1.1.0 dropped Python 2.6 support, and at least one call assert function currently used by tests. --- setup.py | 2 +- test-requirements.txt | 2 +- tox.ini | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/setup.py b/setup.py index d89a5ca52b..37899c2e01 100644 --- a/setup.py +++ b/setup.py @@ -216,7 +216,7 @@ def run_setup(extensions): keywords='cassandra,cql,orm', include_package_data=True, install_requires=dependencies, - tests_require=['nose', 'mock', 'PyYAML', 'pytz', 'sure'], + tests_require=['nose', 'mock<=1.0.1', 'PyYAML', 'pytz', 'sure'], classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', diff --git a/test-requirements.txt b/test-requirements.txt index a90f1ad5c5..cc7b3c2b2e 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -2,7 +2,7 @@ blist scales nose -mock +mock<=1.0.1 ccm>=2.0 unittest2 PyYAML diff --git a/tox.ini b/tox.ini index b1f99087ad..d1ffc5456d 100644 --- a/tox.ini +++ b/tox.ini @@ -3,7 +3,7 @@ envlist = py{26,27,33,34},pypy,pypy3 [base] deps = nose - mock + mock<=1.0.1 PyYAML six From be47fce1c13a2c574a07bdef5dcead2d966acac7 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 15 Jul 2015 15:29:23 -0500 Subject: [PATCH 1571/3726] Make tox pypy commands use absolute path to unit tests follows from new setdir --- tox.ini | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/tox.ini b/tox.ini index d1ffc5456d..5b5832af0c 100644 --- a/tox.ini +++ b/tox.ini @@ -20,13 +20,11 @@ commands = nosetests --verbosity=2 --no-path-adjustment {toxinidir}/tests/unit/ [testenv:pypy] deps = {[base]deps} -commands = {envpython} setup.py build_ext --inplace - nosetests --verbosity=2 tests/unit/ +commands = nosetests --verbosity=2 {toxinidir}/tests/unit/ [testenv:pypy3] deps = {[base]deps} -commands = {envpython} setup.py build_ext --inplace - nosetests --verbosity=2 tests/unit/ +commands = nosetests --verbosity=2 {toxinidir}/tests/unit/ # cqlengine/test_timestamp.py uses sure, which fails in pypy presently # could remove sure usage From 0d9cfbd77c2035ce091b032362ab33121cb938d1 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 15 Jul 2015 16:47:55 -0500 Subject: [PATCH 1572/3726] Removed unused cassandra.protocol._message_types_by_name --- cassandra/protocol.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/cassandra/protocol.py b/cassandra/protocol.py index 0aa9ae4846..0dbd513e65 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -64,7 +64,6 @@ class InternalError(Exception): CUSTOM_PAYLOAD_FLAG = 0x04 WARNING_FLAG = 0x08 -_message_types_by_name = {} _message_types_by_opcode = {} _UNSET_VALUE = object() @@ -72,7 +71,6 @@ class InternalError(Exception): class _RegisterMessageType(type): def __init__(cls, name, bases, dct): if not name.startswith('_'): - _message_types_by_name[cls.name] = cls _message_types_by_opcode[cls.opcode] = cls From d5b7f9264295dd16345e09e843f2ca1fdb4428b5 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 16 Jul 2015 11:16:17 -0500 Subject: [PATCH 1573/3726] Add pypy3 config to .travis.yml --- .travis.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 3aa6228b92..dea9855234 100644 --- a/.travis.yml +++ b/.travis.yml @@ -7,15 +7,17 @@ env: - TOX_ENV=py27 CASS_VER=20 - TOX_ENV=py27 CASS_VER=21 - TOX_ENV=py27 CASS_VER=21 CYTHON_DEP=cython - - TOX_ENV=pypy CASS_VER=21 - TOX_ENV=py33 CASS_VER=21 - TOX_ENV=py33 CASS_VER=21 CYTHON_DEP=cython - TOX_ENV=py34 CASS_VER=21 - TOX_ENV=py34 CASS_VER=21 CYTHON_DEP=cython + - TOX_ENV=pypy CASS_VER=21 + - TOX_ENV=pypy3 CASS_VER=21 before_install: - sudo apt-get update -y - sudo apt-get install -y build-essential python-dev + - sudo apt-get install -y pypy-dev - sudo apt-get install -y libev4 libev-dev - sudo echo "deb http://www.apache.org/dist/cassandra/debian ${CASS_VER}x main" | sudo tee -a /etc/apt/sources.list - sudo echo "deb-src http://www.apache.org/dist/cassandra/debian ${CASS_VER}x main" | sudo tee -a /etc/apt/sources.list From ef07a9ec4826cc950574d496fa65d5b4488d068a Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 16 Jul 2015 11:48:48 -0500 Subject: [PATCH 1574/3726] Type code "enum" in protocol --- cassandra/protocol.py | 58 ++++++++++++++++++++++--------------------- 1 file changed, 30 insertions(+), 28 deletions(-) diff --git a/cassandra/protocol.py b/cassandra/protocol.py index 0dbd513e65..41439334d9 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -531,6 +531,34 @@ def send_body(self, f, protocol_version): RESULT_KIND_PREPARED = 0x0004 RESULT_KIND_SCHEMA_CHANGE = 0x0005 +class CassandraTypeCodes(object): + CUSTOM_TYPE = 0x0000 + AsciiType = 0x0001 + LongType = 0x0002 + BytesType = 0x0003 + BooleanType = 0x0004 + CounterColumnType = 0x0005 + DecimalType = 0x0006 + DoubleType = 0x0007 + FloatType = 0x0008 + Int32Type = 0x0009 + UTF8Type = 0x000A + DateType = 0x000B + UUIDType = 0x000C + UTF8Type = 0x000D + IntegerType = 0x000E + TimeUUIDType = 0x000F + InetAddressType = 0x0010 + SimpleDateType = 0x0011 + TimeType = 0x0012 + ShortType = 0x0013 + ByteType = 0x0014 + ListType = 0x0020 + MapType = 0x0021 + SetType = 0x0022 + UserType = 0x0030 + TupleType = 0x0031 + class ResultMessage(_MessageType): opcode = 0x08 @@ -540,34 +568,8 @@ class ResultMessage(_MessageType): results = None paging_state = None - type_codes = { - 0x0000: CUSTOM_TYPE, - 0x0001: AsciiType, - 0x0002: LongType, - 0x0003: BytesType, - 0x0004: BooleanType, - 0x0005: CounterColumnType, - 0x0006: DecimalType, - 0x0007: DoubleType, - 0x0008: FloatType, - 0x0009: Int32Type, - 0x000A: UTF8Type, - 0x000B: DateType, - 0x000C: UUIDType, - 0x000D: UTF8Type, - 0x000E: IntegerType, - 0x000F: TimeUUIDType, - 0x0010: InetAddressType, - 0x0011: SimpleDateType, - 0x0012: TimeType, - 0x0013: ShortType, - 0x0014: ByteType, - 0x0020: ListType, - 0x0021: MapType, - 0x0022: SetType, - 0x0030: UserType, - 0x0031: TupleType, - } + # Names match type name in module scope. Most are imported from cassandra.cqltypes (except CUSTOM_TYPE) + type_codes = _cqltypes_by_code = dict((v, globals()[k]) for k, v in CassandraTypeCodes.__dict__.items() if not k.startswith('_')) _FLAGS_GLOBAL_TABLES_SPEC = 0x0001 _HAS_MORE_PAGES_FLAG = 0x0002 From c9fb54ada1df6a7324c858818182ad04b542fb0a Mon Sep 17 00:00:00 2001 From: GregBestland Date: Thu, 16 Jul 2015 11:49:58 -0500 Subject: [PATCH 1575/3726] Deeply nested tuples can cause a stack overflow on the C* when using small stack sizes. This manifested it's self with Java 8. Reducing the depth of nested tuples in the test to ensure we don't run into this problem. --- tests/integration/standard/test_types.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/integration/standard/test_types.py b/tests/integration/standard/test_types.py index f5c148ddd4..7321ef1e00 100644 --- a/tests/integration/standard/test_types.py +++ b/tests/integration/standard/test_types.py @@ -627,13 +627,13 @@ def test_can_insert_nested_tuples(self): "v_1 frozen<%s>," "v_2 frozen<%s>," "v_3 frozen<%s>," - "v_128 frozen<%s>" + "v_32 frozen<%s>" ")" % (self.nested_tuples_schema_helper(1), self.nested_tuples_schema_helper(2), self.nested_tuples_schema_helper(3), - self.nested_tuples_schema_helper(128))) + self.nested_tuples_schema_helper(32))) - for i in (1, 2, 3, 128): + for i in (1, 2, 3, 32): # create tuple created_tuple = self.nested_tuples_creator_helper(i) From 7dcabd744a419dd9b3b9345d3854373e0ade37e6 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 16 Jul 2015 14:35:21 -0500 Subject: [PATCH 1576/3726] Use real locks, not mocks, when dealing with threads in unit tests --- tests/unit/test_host_connection_pool.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/unit/test_host_connection_pool.py b/tests/unit/test_host_connection_pool.py index 7a351afb2e..fec889e922 100644 --- a/tests/unit/test_host_connection_pool.py +++ b/tests/unit/test_host_connection_pool.py @@ -18,7 +18,7 @@ import unittest # noqa from mock import Mock, NonCallableMagicMock -from threading import Thread, Event +from threading import Thread, Event, Lock from cassandra.cluster import Session from cassandra.connection import Connection @@ -74,7 +74,7 @@ def test_failed_wait_for_connection(self): def test_successful_wait_for_connection(self): host = Mock(spec=Host, address='ip1') session = self.make_session() - conn = NonCallableMagicMock(spec=Connection, in_flight=0, is_defunct=False, is_closed=False, max_request_id=100) + conn = NonCallableMagicMock(spec=Connection, in_flight=0, is_defunct=False, is_closed=False, max_request_id=100, lock=Lock()) session.cluster.connection_factory.return_value = conn pool = HostConnectionPool(host, HostDistance.LOCAL, session) @@ -98,7 +98,7 @@ def get_second_conn(): def test_all_connections_trashed(self): host = Mock(spec=Host, address='ip1') session = self.make_session() - conn = NonCallableMagicMock(spec=Connection, in_flight=0, is_defunct=False, is_closed=False, max_request_id=100) + conn = NonCallableMagicMock(spec=Connection, in_flight=0, is_defunct=False, is_closed=False, max_request_id=100, lock=Lock()) session.cluster.connection_factory.return_value = conn session.cluster.get_core_connections_per_host.return_value = 1 From ab9f690707b6a1a76dbd6521e907bfdf7283d8c4 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 16 Jul 2015 15:13:19 -0500 Subject: [PATCH 1577/3726] Isolate custom protocol handling to client requests. --- cassandra/cluster.py | 38 +++++++++++++++------------- cassandra/connection.py | 33 ++++++++++++------------ cassandra/io/asyncorereactor.py | 4 +-- cassandra/io/eventletreactor.py | 2 +- cassandra/io/geventreactor.py | 2 +- cassandra/io/libevreactor.py | 2 +- cassandra/io/twistedreactor.py | 6 ++--- tests/unit/io/test_twistedreactor.py | 4 +-- tests/unit/test_connection.py | 16 ++++++------ tests/unit/test_response_future.py | 12 ++++----- 10 files changed, 61 insertions(+), 58 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index c26e10f1a5..5f25e1781c 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -420,15 +420,6 @@ def auth_provider(self, value): GeventConnection will be used automatically. """ - protocol_handler_class = ProtocolHandler - """ - Specifies a protocol handler class, which can be used to override or extend features - such as message or type deserialization. - - The class must conform to the public classmethod interface defined in the default - implementation, :class:`cassandra.protocol.ProtocolHandler` - """ - control_connection_timeout = 2.0 """ A timeout, in seconds, for queries made by the control connection, such @@ -525,8 +516,7 @@ def __init__(self, idle_heartbeat_interval=30, schema_event_refresh_window=2, topology_event_refresh_window=10, - connect_timeout=5, - protocol_handler_class=None): + connect_timeout=5): """ Any of the mutable Cluster attributes may be set as keyword arguments to the constructor. @@ -570,9 +560,6 @@ def __init__(self, if connection_class is not None: self.connection_class = connection_class - if protocol_handler_class is not None: - self.protocol_handler_class = protocol_handler_class - self.metrics_enabled = metrics_enabled self.ssl_options = ssl_options self.sockopts = sockopts @@ -812,7 +799,6 @@ def _make_connection_kwargs(self, address, kwargs_dict): kwargs_dict.setdefault('cql_version', self.cql_version) kwargs_dict.setdefault('protocol_version', self.protocol_version) kwargs_dict.setdefault('user_type_map', self._user_types) - kwargs_dict.setdefault('protocol_handler_class', self.protocol_handler_class) return kwargs_dict @@ -1372,7 +1358,7 @@ def _prepare_all_queries(self, host): log.debug("Preparing all known prepared statements against host %s", host) connection = None try: - connection = self.connection_factory(host.address, protocol_handler_class=ProtocolHandler) + connection = self.connection_factory(host.address) try: self.control_connection.wait_for_schema_agreement(connection) except Exception: @@ -1535,6 +1521,20 @@ class Session(object): .. versionadded:: 2.1.0 """ + client_protocol_handler = ProtocolHandler + """ + Specifies a protocol handler that will be used for client-initiated requests (i.e. no + internal driver requests). This can be used to override or extend features such as + message or type ser/des. + + The class must conform to the public classmethod interface defined in the default + implementation, :class:`cassandra.protocol.ProtocolHandler` + + This is not included in published documentation as it is not intended for the casual user. + It requires knowledge of the native protocol and driver internals. Only advanced, specialized + use cases should need to do anything with this. + """ + _lock = None _pools = None _load_balancer = None @@ -1661,6 +1661,7 @@ def execute_async(self, query, parameters=None, trace=False, custom_payload=None timeout = self.default_timeout future = self._create_response_future(query, parameters, trace, custom_payload, timeout) + future._protocol_handler = self.client_protocol_handler future.send_request() return future @@ -2131,7 +2132,7 @@ def _try_connect(self, host): while True: try: - connection = self._cluster.connection_factory(host.address, is_control_connection=True, protocol_handler_class=ProtocolHandler) + connection = self._cluster.connection_factory(host.address, is_control_connection=True) break except ProtocolVersionUnsupported as e: self._cluster.protocol_downgrade(host.address, e.startup_version) @@ -2846,6 +2847,7 @@ class ResponseFuture(object): _custom_payload = None _warnings = None _timer = None + _protocol_handler = ProtocolHandler def __init__(self, session, message, query, timeout, metrics=None, prepared_statement=None): self.session = session @@ -2919,7 +2921,7 @@ def _query(self, host, message=None, cb=None): # TODO get connectTimeout from cluster settings connection, request_id = pool.borrow_connection(timeout=2.0) self._connection = connection - connection.send_msg(message, request_id, cb=cb) + connection.send_msg(message, request_id, cb=cb, encoder=self._protocol_handler.encode_message, decoder=self._protocol_handler.decode_message) return request_id except NoConnectionsAvailable as exc: log.debug("All connections for host %s are at capacity, moving to the next host", host) diff --git a/cassandra/connection.py b/cassandra/connection.py index d1e3a9c97a..5db88985e3 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -209,7 +209,7 @@ class Connection(object): def __init__(self, host='127.0.0.1', port=9042, authenticator=None, ssl_options=None, sockopts=None, compression=True, cql_version=None, protocol_version=MAX_SUPPORTED_VERSION, is_control_connection=False, - user_type_map=None, protocol_handler_class=ProtocolHandler): + user_type_map=None): self.host = host self.port = port self.authenticator = authenticator @@ -220,10 +220,8 @@ def __init__(self, host='127.0.0.1', port=9042, authenticator=None, self.protocol_version = protocol_version self.is_control_connection = is_control_connection self.user_type_map = user_type_map - self.decoder = protocol_handler_class.decode_message - self.encoder = protocol_handler_class.encode_message self._push_watchers = defaultdict(set) - self._callbacks = {} + self._requests = {} self._iobuf = io.BytesIO() if protocol_version >= 3: @@ -320,20 +318,20 @@ def defunct(self, exc): self.last_error = exc self.close() - self.error_all_callbacks(exc) + self.error_all_requests(exc) self.connected_event.set() return exc - def error_all_callbacks(self, exc): + def error_all_requests(self, exc): with self.lock: - callbacks = self._callbacks - self._callbacks = {} + requests = self._requests + self._requests = {} new_exc = ConnectionShutdown(str(exc)) - for cb in callbacks.values(): + for cb, _ in requests.values(): try: cb(new_exc) except Exception: - log.warning("Ignoring unhandled exception while erroring callbacks for a " + log.warning("Ignoring unhandled exception while erroring requests for a " "failed connection (%s) to host %s:", id(self), self.host, exc_info=True) @@ -357,14 +355,16 @@ def handle_pushed(self, response): except Exception: log.exception("Pushed event handler errored, ignoring:") - def send_msg(self, msg, request_id, cb): + def send_msg(self, msg, request_id, cb, encoder=ProtocolHandler.encode_message, decoder=ProtocolHandler.decode_message): if self.is_defunct: raise ConnectionShutdown("Connection to %s is defunct" % self.host) elif self.is_closed: raise ConnectionShutdown("Connection to %s is closed" % self.host) - self._callbacks[request_id] = cb - self.push(self.encoder(msg, request_id, self.protocol_version, compressor=self.compressor)) + # queue the decoder function with the request + # this allows us to inject custom functions per request to encode, decode messages + self._requests[request_id] = (cb, decoder) + self.push(encoder(msg, request_id, self.protocol_version, compressor=self.compressor)) return request_id def wait_for_response(self, msg, timeout=None): @@ -492,16 +492,17 @@ def process_msg(self, header, body): stream_id = header.stream if stream_id < 0: callback = None + decoder = ProtocolHandler.decode_message else: - callback = self._callbacks.pop(stream_id, None) + callback, decoder = self._requests.pop(stream_id, None) with self.lock: self.request_ids.append(stream_id) self.msg_received = True try: - response = self.decoder(header.version, self.user_type_map, stream_id, - header.flags, header.opcode, body, self.decompressor) + response = decoder(header.version, self.user_type_map, stream_id, + header.flags, header.opcode, body, self.decompressor) except Exception as exc: log.exception("Error decoding response from Cassandra. " "opcode: %04x; message contents: %r", header.opcode, body) diff --git a/cassandra/io/asyncorereactor.py b/cassandra/io/asyncorereactor.py index fae87a7354..dc9d26c6c9 100644 --- a/cassandra/io/asyncorereactor.py +++ b/cassandra/io/asyncorereactor.py @@ -183,7 +183,7 @@ def close(self): log.debug("Closed socket to %s", self.host) if not self.is_defunct: - self.error_all_callbacks( + self.error_all_requests( ConnectionShutdown("Connection to %s was closed" % self.host)) # don't leave in-progress operations hanging self.connected_event.set() @@ -239,7 +239,7 @@ def handle_read(self): if self._iobuf.tell(): self.process_io_buffer() - if not self._callbacks and not self.is_control_connection: + if not self._requests and not self.is_control_connection: self._readable = False def push(self, data): diff --git a/cassandra/io/eventletreactor.py b/cassandra/io/eventletreactor.py index b0206f43d2..aa90cc9abd 100644 --- a/cassandra/io/eventletreactor.py +++ b/cassandra/io/eventletreactor.py @@ -116,7 +116,7 @@ def close(self): log.debug("Closed socket to %s" % (self.host,)) if not self.is_defunct: - self.error_all_callbacks( + self.error_all_requests( ConnectionShutdown("Connection to %s was closed" % self.host)) # don't leave in-progress operations hanging self.connected_event.set() diff --git a/cassandra/io/geventreactor.py b/cassandra/io/geventreactor.py index 60a3f2d031..f26e61523c 100644 --- a/cassandra/io/geventreactor.py +++ b/cassandra/io/geventreactor.py @@ -106,7 +106,7 @@ def close(self): log.debug("Closed socket to %s" % (self.host,)) if not self.is_defunct: - self.error_all_callbacks( + self.error_all_requests( ConnectionShutdown("Connection to %s was closed" % self.host)) # don't leave in-progress operations hanging self.connected_event.set() diff --git a/cassandra/io/libevreactor.py b/cassandra/io/libevreactor.py index 9114af133b..6b2036e2ee 100644 --- a/cassandra/io/libevreactor.py +++ b/cassandra/io/libevreactor.py @@ -290,7 +290,7 @@ def close(self): # don't leave in-progress operations hanging if not self.is_defunct: - self.error_all_callbacks( + self.error_all_requests( ConnectionShutdown("Connection to %s was closed" % self.host)) def handle_write(self, watcher, revents, errno=None): diff --git a/cassandra/io/twistedreactor.py b/cassandra/io/twistedreactor.py index b02fb6ab32..967a968f24 100644 --- a/cassandra/io/twistedreactor.py +++ b/cassandra/io/twistedreactor.py @@ -96,7 +96,7 @@ def clientConnectionLost(self, connector, reason): It should be safe to call defunct() here instead of just close, because we can assume that if the connection was closed cleanly, there are no - callbacks to error out. If this assumption turns out to be false, we + requests to error out. If this assumption turns out to be false, we can call close() instead of defunct() when "reason" is an appropriate type. """ @@ -213,7 +213,7 @@ def client_connection_made(self): def close(self): """ - Disconnect and error-out all callbacks. + Disconnect and error-out all requests. """ with self.lock: if self.is_closed: @@ -225,7 +225,7 @@ def close(self): log.debug("Closed socket to %s", self.host) if not self.is_defunct: - self.error_all_callbacks( + self.error_all_requests( ConnectionShutdown("Connection to %s was closed" % self.host)) # don't leave in-progress operations hanging self.connected_event.set() diff --git a/tests/unit/io/test_twistedreactor.py b/tests/unit/io/test_twistedreactor.py index d2142b09ca..928436e060 100644 --- a/tests/unit/io/test_twistedreactor.py +++ b/tests/unit/io/test_twistedreactor.py @@ -140,13 +140,13 @@ def test_close(self, mock_connectTCP): """ Verify that close() disconnects the connector and errors callbacks. """ - self.obj_ut.error_all_callbacks = Mock() + self.obj_ut.error_all_requests = Mock() self.obj_ut.add_connection() self.obj_ut.is_closed = False self.obj_ut.close() self.obj_ut.connector.disconnect.assert_called_with() self.assertTrue(self.obj_ut.connected_event.is_set()) - self.assertTrue(self.obj_ut.error_all_callbacks.called) + self.assertTrue(self.obj_ut.error_all_requests.called) def test_handle_read__incomplete(self): """ diff --git a/tests/unit/test_connection.py b/tests/unit/test_connection.py index 268e19d535..521fab6105 100644 --- a/tests/unit/test_connection.py +++ b/tests/unit/test_connection.py @@ -27,7 +27,7 @@ locally_supported_compressions, ConnectionHeartbeat, _Frame) from cassandra.marshal import uint8_pack, uint32_pack, int32_pack from cassandra.protocol import (write_stringmultimap, write_int, write_string, - SupportedMessage) + SupportedMessage, ProtocolHandler) class ConnectionTest(unittest.TestCase): @@ -75,7 +75,7 @@ def make_msg(self, header, body=""): def test_bad_protocol_version(self, *args): c = self.make_connection() - c._callbacks = Mock() + c._requests = Mock() c.defunct = Mock() # read in a SupportedMessage response @@ -93,7 +93,7 @@ def test_bad_protocol_version(self, *args): def test_negative_body_length(self, *args): c = self.make_connection() - c._callbacks = Mock() + c._requests = Mock() c.defunct = Mock() # read in a SupportedMessage response @@ -110,7 +110,7 @@ def test_negative_body_length(self, *args): def test_unsupported_cql_version(self, *args): c = self.make_connection() - c._callbacks = {0: c._handle_options_response} + c._requests = {0: (c._handle_options_response, ProtocolHandler.decode_message)} c.defunct = Mock() c.cql_version = "3.0.3" @@ -133,7 +133,7 @@ def test_unsupported_cql_version(self, *args): def test_prefer_lz4_compression(self, *args): c = self.make_connection() - c._callbacks = {0: c._handle_options_response} + c._requests = {0: (c._handle_options_response, ProtocolHandler.decode_message)} c.defunct = Mock() c.cql_version = "3.0.3" @@ -156,7 +156,7 @@ def test_prefer_lz4_compression(self, *args): def test_requested_compression_not_available(self, *args): c = self.make_connection() - c._callbacks = {0: c._handle_options_response} + c._requests = {0: (c._handle_options_response, ProtocolHandler.decode_message)} c.defunct = Mock() # request lz4 compression c.compression = "lz4" @@ -186,7 +186,7 @@ def test_requested_compression_not_available(self, *args): def test_use_requested_compression(self, *args): c = self.make_connection() - c._callbacks = {0: c._handle_options_response} + c._requests = {0: (c._handle_options_response, ProtocolHandler.decode_message)} c.defunct = Mock() # request snappy compression c.compression = "snappy" @@ -213,7 +213,7 @@ def test_use_requested_compression(self, *args): def test_disable_compression(self, *args): c = self.make_connection() - c._callbacks = {0: c._handle_options_response} + c._requests = {0: (c._handle_options_response, ProtocolHandler.decode_message)} c.defunct = Mock() # disable compression c.compression = False diff --git a/tests/unit/test_response_future.py b/tests/unit/test_response_future.py index eea43f7586..d266ab774e 100644 --- a/tests/unit/test_response_future.py +++ b/tests/unit/test_response_future.py @@ -27,7 +27,7 @@ OverloadedErrorMessage, IsBootstrappingErrorMessage, PreparedQueryNotFound, PrepareMessage, RESULT_KIND_ROWS, RESULT_KIND_SET_KEYSPACE, - RESULT_KIND_SCHEMA_CHANGE) + RESULT_KIND_SCHEMA_CHANGE, ProtocolHandler) from cassandra.policies import RetryPolicy from cassandra.pool import NoConnectionsAvailable from cassandra.query import SimpleStatement @@ -67,7 +67,7 @@ def test_result_message(self): rf.session._pools.get.assert_called_once_with('ip1') pool.borrow_connection.assert_called_once_with(timeout=ANY) - connection.send_msg.assert_called_once_with(rf.message, 1, cb=ANY) + connection.send_msg.assert_called_once_with(rf.message, 1, cb=ANY, encoder=ProtocolHandler.encode_message, decoder=ProtocolHandler.decode_message) rf._set_result(self.make_mock_response([{'col': 'val'}])) result = rf.result() @@ -189,7 +189,7 @@ def test_retry_policy_says_retry(self): rf.session._pools.get.assert_called_once_with('ip1') pool.borrow_connection.assert_called_once_with(timeout=ANY) - connection.send_msg.assert_called_once_with(rf.message, 1, cb=ANY) + connection.send_msg.assert_called_once_with(rf.message, 1, cb=ANY, encoder=ProtocolHandler.encode_message, decoder=ProtocolHandler.decode_message) result = Mock(spec=UnavailableErrorMessage, info={}) rf._set_result(result) @@ -207,7 +207,7 @@ def test_retry_policy_says_retry(self): # an UnavailableException rf.session._pools.get.assert_called_with('ip1') pool.borrow_connection.assert_called_with(timeout=ANY) - connection.send_msg.assert_called_with(rf.message, 2, cb=ANY) + connection.send_msg.assert_called_with(rf.message, 2, cb=ANY, encoder=ProtocolHandler.encode_message, decoder=ProtocolHandler.decode_message) def test_retry_with_different_host(self): session = self.make_session() @@ -222,7 +222,7 @@ def test_retry_with_different_host(self): rf.session._pools.get.assert_called_once_with('ip1') pool.borrow_connection.assert_called_once_with(timeout=ANY) - connection.send_msg.assert_called_once_with(rf.message, 1, cb=ANY) + connection.send_msg.assert_called_once_with(rf.message, 1, cb=ANY, encoder=ProtocolHandler.encode_message, decoder=ProtocolHandler.decode_message) self.assertEqual(ConsistencyLevel.QUORUM, rf.message.consistency_level) result = Mock(spec=OverloadedErrorMessage, info={}) @@ -240,7 +240,7 @@ def test_retry_with_different_host(self): # it should try with a different host rf.session._pools.get.assert_called_with('ip2') pool.borrow_connection.assert_called_with(timeout=ANY) - connection.send_msg.assert_called_with(rf.message, 2, cb=ANY) + connection.send_msg.assert_called_with(rf.message, 2, cb=ANY, encoder=ProtocolHandler.encode_message, decoder=ProtocolHandler.decode_message) # the consistency level should be the same self.assertEqual(ConsistencyLevel.QUORUM, rf.message.consistency_level) From 49357dec114fad411f6b712dda4fa5ddc3e72736 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Thu, 16 Jul 2015 16:08:47 -0500 Subject: [PATCH 1578/3726] Simplify logic in Downgrading write timeout, and test --- cassandra/policies.py | 13 +++++++++---- tests/unit/test_policies.py | 15 ++++++--------- 2 files changed, 15 insertions(+), 13 deletions(-) diff --git a/cassandra/policies.py b/cassandra/policies.py index 6d6c23b972..328b6b82fb 100644 --- a/cassandra/policies.py +++ b/cassandra/policies.py @@ -816,14 +816,19 @@ def on_write_timeout(self, query, consistency, write_type, required_responses, received_responses, retry_num): if retry_num != 0: return (self.RETHROW, None) - elif write_type in (WriteType.SIMPLE, WriteType.BATCH, WriteType.COUNTER) and received_responses > 0: - return (self.IGNORE, None) + + if write_type in (WriteType.SIMPLE, WriteType.BATCH, WriteType.COUNTER): + if received_responses > 0: + # persisted on at least one replica + return (self.IGNORE, None) + else: + return (self.RETHROW, None) elif write_type == WriteType.UNLOGGED_BATCH: return self._pick_consistency(received_responses) elif write_type == WriteType.BATCH_LOG: return (self.RETRY, consistency) - else: - return (self.RETHROW, None) + + return (self.RETHROW, None) def on_unavailable(self, query, consistency, required_replicas, alive_replicas, retry_num): if retry_num != 0: diff --git a/tests/unit/test_policies.py b/tests/unit/test_policies.py index bd8541047f..97e24455aa 100644 --- a/tests/unit/test_policies.py +++ b/tests/unit/test_policies.py @@ -1052,20 +1052,17 @@ def test_write_timeout(self): self.assertEqual(retry, RetryPolicy.RETHROW) self.assertEqual(consistency, None) - # On these type of writes failures should not be ignored - # if received_responses is 0 - for write_type in (WriteType.SIMPLE, WriteType.BATCH, WriteType.COUNTER): - retry, consistency = policy.on_write_timeout( - query=None, consistency=ONE, write_type=write_type, - required_responses=1, received_responses=0, retry_num=0) - self.assertEqual(retry, RetryPolicy.RETHROW) - - # ignore failures on these types of writes for write_type in (WriteType.SIMPLE, WriteType.BATCH, WriteType.COUNTER): + # ignore failures if at least one response (replica persisted) retry, consistency = policy.on_write_timeout( query=None, consistency=ONE, write_type=write_type, required_responses=1, received_responses=2, retry_num=0) self.assertEqual(retry, RetryPolicy.IGNORE) + # retrhow if we can't be sure we have a replica + retry, consistency = policy.on_write_timeout( + query=None, consistency=ONE, write_type=write_type, + required_responses=1, received_responses=0, retry_num=0) + self.assertEqual(retry, RetryPolicy.RETHROW) # downgrade consistency level on unlogged batch writes retry, consistency = policy.on_write_timeout( From 53c17166642c018446aa42d6eab2a9329fa9f184 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 17 Jul 2015 13:46:16 -0500 Subject: [PATCH 1579/3726] cqlengine example tweaks moved import, added a LWT example, set schema management var to avoid warning --- example_mapper.py | 28 +++++++++++++++++++++------- 1 file changed, 21 insertions(+), 7 deletions(-) diff --git a/example_mapper.py b/example_mapper.py index eda77c2e73..31a39eec6d 100755 --- a/example_mapper.py +++ b/example_mapper.py @@ -14,10 +14,14 @@ # See the License for the specific language governing permissions and # limitations under the License. +# silence warnings just for demo -- applications would typically not do this +import os +os.environ['CQLENG_ALLOW_SCHEMA_MANAGEMENT'] = '1' + import logging log = logging.getLogger() -log.setLevel('DEBUG') +log.setLevel('INFO') handler = logging.StreamHandler() handler.setFormatter(logging.Formatter("%(asctime)s [%(levelname)s] %(name)s: %(message)s")) log.addHandler(handler) @@ -27,9 +31,9 @@ from cassandra.cqlengine import columns from cassandra.cqlengine import connection from cassandra.cqlengine import management -from cassandra.cqlengine.exceptions import ValidationError +from cassandra.cqlengine import ValidationError from cassandra.cqlengine.models import Model -from cassandra.cqlengine.query import BatchQuery +from cassandra.cqlengine.query import BatchQuery, LWTException KEYSPACE = "testkeyspace" @@ -61,12 +65,19 @@ def main(): log.info("### syncing model...") management.sync_table(FamilyMembers) - log.info("### add entities serially") - simmons = FamilyMembers.create(surname='Simmons', name='Gene', birth_year=1949, sex='m') # default uuid is assigned + # default uuid is assigned + simmons = FamilyMembers.create(surname='Simmons', name='Gene', birth_year=1949, sex='m') - # add members later + # add members to his family later FamilyMembers.create(id=simmons.id, surname='Simmons', name='Nick', birth_year=1989, sex='m') - FamilyMembers.create(id=simmons.id, surname='Simmons', name='Sophie', sex='f') + sophie = FamilyMembers.create(id=simmons.id, surname='Simmons', name='Sophie', sex='f') + + nick = FamilyMembers.objects(id=simmons.id, surname='Simmons', name='Nick') + try: + nick.iff(birth_year=1988).update(birth_year=1989) + except LWTException: + print "precondition not met" + # showing validation try: FamilyMembers.create(id=simmons.id, surname='Tweed', name='Shannon', birth_year=1957, sex='f') @@ -95,6 +106,9 @@ def main(): for m in FamilyMembers.objects(id=simmons.id, surname=simmons.surname): print m, m.birth_year, m.sex + log.info("### Constrain on clustering key") + kids = FamilyMembers.objects(id=simmons.id, surname=simmons.surname, name__in=['Nick', 'Sophie']) + log.info("### Delete a record") FamilyMembers(id=hogan_id, surname='Hogan', name='Linda').delete() for m in FamilyMembers.objects(id=hogan_id): From 1a480f196ade42798596f5257d2cbeffcadf154f Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 17 Jul 2015 16:14:30 -0500 Subject: [PATCH 1580/3726] Test updates removing refs to schema tables. --- tests/integration/standard/test_cluster.py | 6 +++--- tests/integration/standard/test_types.py | 5 ++--- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index b8439c4de3..f218fd5d26 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -314,13 +314,13 @@ def test_refresh_schema_table(self): original_meta = cluster.metadata.keyspaces original_system_meta = original_meta['system'] - original_system_schema_meta = original_system_meta.tables['schema_columnfamilies'] + original_system_schema_meta = original_system_meta.tables['local'] # only refresh one table - cluster.refresh_table_metadata('system', 'schema_columnfamilies') + cluster.refresh_table_metadata('system', 'local') current_meta = cluster.metadata.keyspaces current_system_meta = current_meta['system'] - current_system_schema_meta = current_system_meta.tables['schema_columnfamilies'] + current_system_schema_meta = current_system_meta.tables['local'] self.assertIs(original_meta, current_meta) self.assertIs(original_system_meta, current_system_meta) self.assertIsNot(original_system_schema_meta, current_system_schema_meta) diff --git a/tests/integration/standard/test_types.py b/tests/integration/standard/test_types.py index f5c148ddd4..fa94b88ba4 100644 --- a/tests/integration/standard/test_types.py +++ b/tests/integration/standard/test_types.py @@ -677,9 +677,8 @@ def test_can_insert_unicode_query_string(self): Test to ensure unicode strings can be used in a query """ s = self.session - - query = u"SELECT * FROM system.schema_columnfamilies WHERE keyspace_name = 'ef\u2052ef' AND columnfamily_name = %s" - s.execute(query, (u"fe\u2051fe",)) + s.execute(u"SELECT * FROM system.local WHERE key = 'ef\u2052ef'") + s.execute(u"SELECT * FROM system.local WHERE key = %s", (u"fe\u2051fe",)) def test_can_read_composite_type(self): """ From b95bb9fefd5b53d85eaf20c7f92dfd68108e6f9c Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 20 Jul 2015 12:45:46 -0500 Subject: [PATCH 1581/3726] 2.6 changelog update --- CHANGELOG.rst | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index a4111bd507..72949a1e02 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,11 @@ +2.6.0 +===== +July 20, 2015 + +Bug Fixes +--------- +* Output proper CQL for compact tables with no clustering columns (PYTHON-360) + 2.6.0c2 ======= June 24, 2015 From 536d7c36ee69ebb7273edfbe78babd40849e620a Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 20 Jul 2015 12:48:56 -0500 Subject: [PATCH 1582/3726] 2.6.0 release version --- cassandra/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 23d4623d24..0a0f29320f 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -23,7 +23,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (2, 6, '0c2', 'post') +__version_info__ = (2, 6, 0) __version__ = '.'.join(map(str, __version_info__)) From 64c95a7ab1453dd56759b08d42cd1ae8e0292f4a Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 20 Jul 2015 13:54:23 -0500 Subject: [PATCH 1583/3726] doc: remove rtype from util functions was being used as x-ref and linking into cqlengine attributes --- cassandra/util.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/cassandra/util.py b/cassandra/util.py index ba49b5687e..83260577df 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -15,8 +15,6 @@ def datetime_from_timestamp(timestamp): and rounding differences in Python 3.4 (PYTHON-340). :param timestamp: a unix timestamp, in seconds - - :rtype: datetime """ dt = DATETIME_EPOC + datetime.timedelta(seconds=timestamp) return dt @@ -29,9 +27,6 @@ def unix_time_from_uuid1(uuid_arg): results of queries returning a v1 :class:`~uuid.UUID`. :param uuid_arg: a version 1 :class:`~uuid.UUID` - - :rtype: timestamp - """ return (uuid_arg.time - 0x01B21DD213814000) / 1e7 @@ -42,9 +37,6 @@ def datetime_from_uuid1(uuid_arg): specified type-1 UUID. :param uuid_arg: a version 1 :class:`~uuid.UUID` - - :rtype: timestamp - """ return datetime_from_timestamp(unix_time_from_uuid1(uuid_arg)) From 0dfcbcabf6dbb720eb27fcdf8b84936571a8c0fc Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 20 Jul 2015 14:15:39 -0500 Subject: [PATCH 1584/3726] post release version update --- cassandra/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 0a0f29320f..4bec0e8aad 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -23,7 +23,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (2, 6, 0) +__version_info__ = (2, 6, 0, 'post') __version__ = '.'.join(map(str, __version_info__)) From f7ef27e006c72049fd99a25db61f8a31f1997907 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Thu, 2 Jul 2015 18:02:29 -0500 Subject: [PATCH 1585/3726] Adding timer tests for async, libev, and twisted. Added integration tests. All for PYTHON-108 --- tests/__init__.py | 18 ++-- tests/integration/long/test_failure_types.py | 90 +++++++++++++++- tests/unit/io/eventlet_utils.py | 37 +++++++ tests/unit/io/gevent_utils.py | 56 ++++++++++ tests/unit/io/test_asyncorereactor.py | 40 ++++++- tests/unit/io/test_eventletreactor.py | 67 ++++++++++++ tests/unit/io/test_geventreactor.py | 78 ++++++++++++++ tests/unit/io/test_libevreactor.py | 9 +- tests/unit/io/test_libevtimer.py | 82 ++++++++++++++ tests/unit/io/test_twistedreactor.py | 53 ++++++++- tests/unit/io/utils.py | 107 +++++++++++++++++++ tox.ini | 13 ++- 12 files changed, 632 insertions(+), 18 deletions(-) create mode 100644 tests/unit/io/eventlet_utils.py create mode 100644 tests/unit/io/gevent_utils.py create mode 100644 tests/unit/io/test_eventletreactor.py create mode 100644 tests/unit/io/test_geventreactor.py create mode 100644 tests/unit/io/test_libevtimer.py create mode 100644 tests/unit/io/utils.py diff --git a/tests/__init__.py b/tests/__init__.py index e1c1e54e11..150e600c91 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -14,6 +14,7 @@ import logging import sys +import socket log = logging.getLogger() log.setLevel('DEBUG') @@ -24,15 +25,18 @@ log.addHandler(handler) -def is_gevent_monkey_patched(): - return 'gevent.monkey' in sys.modules +def is_eventlet_monkey_patched(): + if 'eventlet.patcher' not in sys.modules: + return False + import eventlet.patcher + return eventlet.patcher.is_monkey_patched('socket') -def is_eventlet_monkey_patched(): - if 'eventlet.patcher' in sys.modules: - import eventlet - return eventlet.patcher.is_monkey_patched('socket') - return False +def is_gevent_monkey_patched(): + if 'gevent.monkey' not in sys.modules: + return False + import gevent.socket + return socket.socket is gevent.socket.socket def is_monkey_patched(): diff --git a/tests/integration/long/test_failure_types.py b/tests/integration/long/test_failure_types.py index 30e05b60b6..e611adca35 100644 --- a/tests/integration/long/test_failure_types.py +++ b/tests/integration/long/test_failure_types.py @@ -12,14 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. -import sys, logging, traceback +import sys,logging, traceback, time from cassandra import ConsistencyLevel, OperationTimedOut, ReadTimeout, WriteTimeout, ReadFailure, WriteFailure,\ FunctionFailure from cassandra.cluster import Cluster from cassandra.concurrent import execute_concurrent_with_args from cassandra.query import SimpleStatement -from tests.integration import use_singledc, PROTOCOL_VERSION, get_cluster, setup_keyspace, remove_cluster +from tests.integration import use_singledc, PROTOCOL_VERSION, get_cluster, setup_keyspace, remove_cluster, get_node +from mock import Mock try: import unittest2 as unittest @@ -301,3 +302,88 @@ def test_user_function_failure(self): """ DROP TABLE test3rf.d; """, consistency_level=ConsistencyLevel.ALL, expected_exception=None) + + +class TimeoutTimerTest(unittest.TestCase): + def setUp(self): + """ + Setup sessions and pause node1 + """ + self.cluster = Cluster(protocol_version=PROTOCOL_VERSION) + self.session = self.cluster.connect() + + # self.node1, self.node2, self.node3 = get_cluster().nodes.values() + self.node1 = get_node(1) + self.cluster = Cluster(protocol_version=PROTOCOL_VERSION) + self.session = self.cluster.connect() + + ddl = ''' + CREATE TABLE test3rf.timeout ( + k int PRIMARY KEY, + v int )''' + self.session.execute(ddl) + self.node1.pause() + + def tearDown(self): + """ + Shutdown cluster and resume node1 + """ + self.node1.resume() + self.session.execute("DROP TABLE test3rf.timeout") + self.cluster.shutdown() + + def test_async_timeouts(self): + """ + Test to validate that timeouts are honored + + + Exercise the underlying timeouts, by attempting a query that will timeout. Ensure the default timeout is still + honored. Make sure that user timeouts are also honored. + + @since 2.7.0 + @jira_ticket PYTHON-108 + @expected_result timeouts should be honored + + @test_category + + """ + + # Because node1 is stopped these statements will all timeout + ss = SimpleStatement('SELECT * FROM test3rf.test', consistency_level=ConsistencyLevel.ALL) + + # Test with default timeout (should be 10) + start_time = time.time() + future = self.session.execute_async(ss) + with self.assertRaises(OperationTimedOut): + future.result() + end_time = time.time() + total_time = end_time-start_time + expected_time = self.session.default_timeout + # check timeout and ensure it's within a reasonable range + self.assertAlmostEqual(expected_time, total_time, delta=.05) + + # Test with user defined timeout (Should be 1) + start_time = time.time() + future = self.session.execute_async(ss, timeout=1) + mock_callback = Mock(return_value=None) + mock_errorback = Mock(return_value=None) + future.add_callback(mock_callback) + future.add_errback(mock_errorback) + + with self.assertRaises(OperationTimedOut): + future.result() + end_time = time.time() + total_time = end_time-start_time + expected_time = 1 + # check timeout and ensure it's within a reasonable range + self.assertAlmostEqual(expected_time, total_time, delta=.05) + self.assertTrue(mock_errorback.called) + self.assertFalse(mock_callback.called) + + + + + + + + diff --git a/tests/unit/io/eventlet_utils.py b/tests/unit/io/eventlet_utils.py new file mode 100644 index 0000000000..8aee030f27 --- /dev/null +++ b/tests/unit/io/eventlet_utils.py @@ -0,0 +1,37 @@ +# Copyright 2013-2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import os +import select +import socket +import thread +import Queue +import threading +import __builtin__ +import ssl +import time + + +def eventlet_un_patch_all(): + """ + A method to unpatch eventlet monkey patching used for the reactor tests + """ + + # These are the modules that are loaded by eventlet we reload them all + modules_to_unpatch = [os, select, socket, thread, time, Queue, threading, ssl, __builtin__] + for to_unpatch in modules_to_unpatch: + reload(to_unpatch) + + diff --git a/tests/unit/io/gevent_utils.py b/tests/unit/io/gevent_utils.py new file mode 100644 index 0000000000..1c6e27adde --- /dev/null +++ b/tests/unit/io/gevent_utils.py @@ -0,0 +1,56 @@ +# Copyright 2013-2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from gevent import monkey + + +def gevent_un_patch_all(): + """ + A method to unpatch gevent libraries. These are unloaded + in the same order that gevent monkey patch loads theirs. + Order cannot be arbitrary. This is used in the unit tests to + un monkey patch gevent + """ + restore_saved_module("os") + restore_saved_module("time") + restore_saved_module("thread") + restore_saved_module("threading") + restore_saved_module("_threading_local") + restore_saved_module("stdin") + restore_saved_module("stdout") + restore_saved_module("socket") + restore_saved_module("select") + restore_saved_module("ssl") + restore_saved_module("subprocess") + + +def restore_saved_module(module): + """ + gevent monkey patch keeps a list of all patched modules. + This will restore the original ones + :param module: to unpatch + :return: + """ + + # Check the saved attributes in geven monkey patch + if not (module in monkey.saved): + return + _module = __import__(module) + + # If it exist unpatch it + for attr in monkey.saved[module]: + if hasattr(_module, attr): + setattr(_module, attr, monkey.saved[module][attr]) + diff --git a/tests/unit/io/test_asyncorereactor.py b/tests/unit/io/test_asyncorereactor.py index 9d97f4688a..16fbf2fb72 100644 --- a/tests/unit/io/test_asyncorereactor.py +++ b/tests/unit/io/test_asyncorereactor.py @@ -21,20 +21,20 @@ import errno import math +import time from mock import patch, Mock import os from six import BytesIO import socket from socket import error as socket_error - from cassandra.connection import (HEADER_DIRECTION_TO_CLIENT, - ConnectionException, ProtocolError) + ConnectionException, ProtocolError,Timer) from cassandra.io.asyncorereactor import AsyncoreConnection from cassandra.protocol import (write_stringmultimap, write_int, write_string, SupportedMessage, ReadyMessage, ServerError) from cassandra.marshal import uint8_pack, uint32_pack, int32_pack - from tests import is_monkey_patched +from tests.unit.io.utils import submit_and_wait_for_completion, TimerCallback class AsyncoreConnectionTest(unittest.TestCase): @@ -300,3 +300,37 @@ def test_partial_message_read(self, *args): self.assertTrue(c.connected_event.is_set()) self.assertFalse(c.is_defunct) + + def test_multi_timer_validation(self, *args): + """ + Verify that timer timeouts are honored appropriately + """ + c = self.make_connection() + # Tests timers submitted in order at various timeouts + submit_and_wait_for_completion(self, AsyncoreConnection, 0, 100, 1, 100) + # Tests timers submitted in reverse order at various timeouts + submit_and_wait_for_completion(self, AsyncoreConnection, 100, 0, -1, 100) + # Tests timers submitted in varying order at various timeouts + submit_and_wait_for_completion(self, AsyncoreConnection, 0, 100, 1, 100, True) + + def test_timer_cancellation(self): + """ + Verify that timer cancellation is honored + """ + + # Various lists for tracking callback stage + connection = self.make_connection() + timeout = .1 + callback = TimerCallback(timeout) + timer = connection.create_timer(timeout, callback.invoke) + timer.cancel() + # Release context allow for timer thread to run. + time.sleep(.2) + timer_manager = connection._loop._timers + # Assert that the cancellation was honored + self.assertFalse(timer_manager._queue) + self.assertFalse(timer_manager._new_timers) + self.assertFalse(callback.was_invoked()) + + + diff --git a/tests/unit/io/test_eventletreactor.py b/tests/unit/io/test_eventletreactor.py new file mode 100644 index 0000000000..9f071d3bec --- /dev/null +++ b/tests/unit/io/test_eventletreactor.py @@ -0,0 +1,67 @@ +# Copyright 2013-2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa + +from tests.unit.io.utils import submit_and_wait_for_completion, TimerCallback +from tests import is_eventlet_monkey_patched +import time + +try: + from cassandra.io.eventletreactor import EventletConnection +except ImportError: + EventletConnection = None # noqa + + +class EventletTimerTest(unittest.TestCase): + + def setUp(self): + if EventletConnection is None: + raise unittest.SkipTest("Eventlet libraries not available") + if not is_eventlet_monkey_patched(): + raise unittest.SkipTest("Can't test eventlet without monkey patching") + EventletConnection.initialize_reactor() + + def test_multi_timer_validation(self, *args): + """ + Verify that timer timeouts are honored appropriately + """ + # Tests timers submitted in order at various timeouts + submit_and_wait_for_completion(self, EventletConnection, 0, 100, 1, 100) + # Tests timers submitted in reverse order at various timeouts + submit_and_wait_for_completion(self, EventletConnection, 100, 0, -1, 100) + # Tests timers submitted in varying order at various timeouts + submit_and_wait_for_completion(self, EventletConnection, 0, 100, 1, 100, True) + + def test_timer_cancellation(self): + """ + Verify that timer cancellation is honored + """ + + # Various lists for tracking callback stage + timeout = .1 + callback = TimerCallback(timeout) + timer = EventletConnection.create_timer(timeout, callback.invoke) + timer.cancel() + # Release context allow for timer thread to run. + time.sleep(.2) + timer_manager = EventletConnection._timers + # Assert that the cancellation was honored + self.assertFalse(timer_manager._queue) + self.assertFalse(timer_manager._new_timers) + self.assertFalse(callback.was_invoked()) \ No newline at end of file diff --git a/tests/unit/io/test_geventreactor.py b/tests/unit/io/test_geventreactor.py new file mode 100644 index 0000000000..c90be37d24 --- /dev/null +++ b/tests/unit/io/test_geventreactor.py @@ -0,0 +1,78 @@ +# Copyright 2013-2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa + +import time +from tests.unit.io.utils import submit_and_wait_for_completion, TimerCallback +from tests import is_gevent_monkey_patched + +try: + from cassandra.io.geventreactor import GeventConnection + import gevent.monkey + from gevent_utils import gevent_un_patch_all +except ImportError: + GeventConnection = None # noqa + + +class GeventTimerTest(unittest.TestCase): + + @classmethod + def setUpClass(cls): + if GeventConnection is not None: + if not is_gevent_monkey_patched(): + gevent.monkey.patch_all() + + @classmethod + def tearDownClass(cls): + if is_gevent_monkey_patched(): + gevent_un_patch_all() + + def setUp(self): + if not is_gevent_monkey_patched(): + raise unittest.SkipTest("Can't test gevent without monkey patching") + GeventConnection.initialize_reactor() + + def test_multi_timer_validation(self, *args): + """ + Verify that timer timeouts are honored appropriately + """ + + # Tests timers submitted in order at various timeouts + submit_and_wait_for_completion(self, GeventConnection, 0, 100, 1, 100) + # Tests timers submitted in reverse order at various timeouts + submit_and_wait_for_completion(self, GeventConnection, 100, 0, -1, 100) + # Tests timers submitted in varying order at various timeouts + submit_and_wait_for_completion(self, GeventConnection, 0, 100, 1, 100, True), + + def test_timer_cancellation(self): + """ + Verify that timer cancellation is honored + """ + + # Various lists for tracking callback stage + timeout = .1 + callback = TimerCallback(timeout) + timer = GeventConnection.create_timer(timeout, callback.invoke) + timer.cancel() + # Release context allow for timer thread to run. + time.sleep(.2) + timer_manager = GeventConnection._timers + # Assert that the cancellation was honored + self.assertFalse(timer_manager._queue) + self.assertFalse(timer_manager._new_timers) + self.assertFalse(callback.was_invoked()) diff --git a/tests/unit/io/test_libevreactor.py b/tests/unit/io/test_libevreactor.py index 334480ef3b..2b26343a63 100644 --- a/tests/unit/io/test_libevreactor.py +++ b/tests/unit/io/test_libevreactor.py @@ -24,6 +24,7 @@ from six import BytesIO from socket import error as socket_error import sys +import time from cassandra.connection import (HEADER_DIRECTION_TO_CLIENT, ConnectionException, ProtocolError) @@ -31,6 +32,10 @@ from cassandra.protocol import (write_stringmultimap, write_int, write_string, SupportedMessage, ReadyMessage, ServerError) from cassandra.marshal import uint8_pack, uint32_pack, int32_pack +from tests.unit.io.utils import TimerCallback +from tests.unit.io.utils import submit_and_wait_for_completion +from tests import is_monkey_patched + try: from cassandra.io.libevreactor import LibevConnection @@ -46,7 +51,7 @@ class LibevConnectionTest(unittest.TestCase): def setUp(self): - if 'gevent.monkey' in sys.modules: + if is_monkey_patched(): raise unittest.SkipTest("Can't test libev with monkey patching") if LibevConnection is None: raise unittest.SkipTest('libev does not appear to be installed correctly') @@ -290,4 +295,4 @@ def test_partial_message_read(self, *args): c.handle_read(None, 0) self.assertTrue(c.connected_event.is_set()) - self.assertFalse(c.is_defunct) + self.assertFalse(c.is_defunct) \ No newline at end of file diff --git a/tests/unit/io/test_libevtimer.py b/tests/unit/io/test_libevtimer.py new file mode 100644 index 0000000000..988282c2b1 --- /dev/null +++ b/tests/unit/io/test_libevtimer.py @@ -0,0 +1,82 @@ +# Copyright 2013-2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa + + +from mock import patch, Mock + +import time + +from tests.unit.io.utils import submit_and_wait_for_completion, TimerCallback +from tests import is_monkey_patched + + +try: + from cassandra.io.libevreactor import LibevConnection +except ImportError: + LibevConnection = None # noqa + + +@patch('socket.socket') +@patch('cassandra.io.libevwrapper.IO') +class LibevTimerTest(unittest.TestCase): + + def setUp(self): + if is_monkey_patched(): + raise unittest.SkipTest("Can't test libev with monkey patching") + if LibevConnection is None: + raise unittest.SkipTest('libev does not appear to be installed correctly') + LibevConnection.initialize_reactor() + + def make_connection(self): + c = LibevConnection('1.2.3.4', cql_version='3.0.1') + c._socket = Mock() + c._socket.send.side_effect = lambda x: len(x) + return c + + def test_multi_timer_validation(self, *args): + """ + Verify that timer timeouts are honored appropriately + """ + c = self.make_connection() + c.initialize_reactor() + # Tests timers submitted in order at various timeouts + submit_and_wait_for_completion(self, c, 0, 100, 1, 100) + # Tests timers submitted in reverse order at various timeouts + submit_and_wait_for_completion(self, c, 100, 0, -1, 100) + # Tests timers submitted in varying order at various timeouts + submit_and_wait_for_completion(self, c, 0, 100, 1, 100, True) + + def test_timer_cancellation(self, *args): + """ + Verify that timer cancellation is honored + """ + + # Various lists for tracking callback stage + connection = self.make_connection() + timeout = .1 + callback = TimerCallback(timeout) + timer = connection.create_timer(timeout, callback.invoke) + timer.cancel() + # Release context allow for timer thread to run. + time.sleep(.2) + timer_manager = connection._libevloop._timers + # Assert that the cancellation was honored + self.assertFalse(timer_manager._queue) + self.assertFalse(timer_manager._new_timers) + self.assertFalse(callback.was_invoked()) + diff --git a/tests/unit/io/test_twistedreactor.py b/tests/unit/io/test_twistedreactor.py index d2142b09ca..7dcfa5609d 100644 --- a/tests/unit/io/test_twistedreactor.py +++ b/tests/unit/io/test_twistedreactor.py @@ -17,6 +17,7 @@ except ImportError: import unittest from mock import Mock, patch +import time try: from twisted.test import proto_helpers @@ -26,6 +27,54 @@ twistedreactor = None # NOQA from cassandra.connection import _Frame +from tests.unit.io.utils import submit_and_wait_for_completion, TimerCallback + + +class TestTwistedTimer(unittest.TestCase): + """ + Simple test class that is used to validate that the TimerManager, and timer + classes function appropriately with the twisted infrastructure + """ + + def setUp(self): + if twistedreactor is None: + raise unittest.SkipTest("Twisted libraries not available") + twistedreactor.TwistedConnection.initialize_reactor() + + def test_multi_timer_validation(self): + """ + Verify that the timers are called in the correct order + """ + twistedreactor.TwistedConnection.initialize_reactor() + connection = twistedreactor.TwistedConnection('1.2.3.4', + cql_version='3.0.1') + # Tests timers submitted in order at various timeouts + submit_and_wait_for_completion(self, connection, 0, 100, 1, 100) + # Tests timers submitted in reverse order at various timeouts + submit_and_wait_for_completion(self, connection, 100, 0, -1, 100) + # Tests timers submitted in varying order at various timeouts + submit_and_wait_for_completion(self, connection, 0, 100, 1, 100, True) + + def test_timer_cancellation(self, *args): + """ + Verify that timer cancellation is honored + """ + + # Various lists for tracking callback stage + connection = twistedreactor.TwistedConnection('1.2.3.4', + cql_version='3.0.1') + timeout = .1 + callback = TimerCallback(timeout) + timer = connection.create_timer(timeout, callback.invoke) + timer.cancel() + # Release context allow for timer thread to run. + time.sleep(.2) + timer_manager = connection._loop._timers + # Assert that the cancellation was honored + self.assertFalse(timer_manager._queue) + self.assertFalse(timer_manager._new_timers) + self.assertFalse(callback.was_invoked()) + class TestTwistedProtocol(unittest.TestCase): @@ -96,8 +145,6 @@ def setUp(self): twistedreactor.TwistedConnection.initialize_reactor() self.reactor_cft_patcher = patch( 'twisted.internet.reactor.callFromThread') - self.reactor_running_patcher = patch( - 'twisted.internet.reactor.running', False) self.reactor_run_patcher = patch('twisted.internet.reactor.run') self.mock_reactor_cft = self.reactor_cft_patcher.start() self.mock_reactor_run = self.reactor_run_patcher.start() @@ -107,7 +154,6 @@ def setUp(self): def tearDown(self): self.reactor_cft_patcher.stop() self.reactor_run_patcher.stop() - self.obj_ut._loop._cleanup() def test_connection_initialization(self): """ @@ -196,3 +242,4 @@ def test_push(self, mock_connectTCP): self.obj_ut.push('123 pickup') self.mock_reactor_cft.assert_called_with( self.obj_ut.connector.transport.write, '123 pickup') + diff --git a/tests/unit/io/utils.py b/tests/unit/io/utils.py new file mode 100644 index 0000000000..26307ffaf9 --- /dev/null +++ b/tests/unit/io/utils.py @@ -0,0 +1,107 @@ +# Copyright 2013-2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import time + + +class TimerCallback(): + + invoked = False + created_time = 0 + invoked_time = 0 + expected_wait = 0 + + def __init__(self, expected_wait): + self.invoked = False + self.created_time = time.time() + self.expected_wait = expected_wait + + def invoke(self): + self.invoked_time = time.time() + self.invoked = True + + def was_invoked(self): + return self.invoked + + def get_wait_time(self): + elapsed_time = self.invoked_time - self.created_time + return elapsed_time + + def wait_match_excepted(self): + if self.expected_wait-.01 <= self.get_wait_time() <= self.expected_wait+.01: + return True + return False + + +def get_timeout(gross_time, start, end, precision, split_range): + """ + A way to generate varying timeouts based on ranges + :param gross_time: Some integer between start and end + :param start: the start value of the range + :param end: the end value of the range + :param precision: the precision to use to generate the timeout. + :param split_range: generate values from both ends + :return: a timeout value to use + """ + if(split_range): + top_num = float(end)/precision + bottom_num = float(start)/precision + if gross_time % 2 == 0: + timeout = top_num - float(gross_time)/precision + else: + timeout = bottom_num + float(gross_time)/precision + + else: + timeout = float(gross_time)/precision + + return timeout + + +def submit_and_wait_for_completion(unit_test, connection, start, end, increment, precision, split_range=False): + """ + This will submit a number of timers to the provided connection. It will then ensure that the corresponding + callback is invoked in the appropriate amount of time. + :param unit_test: Invoking unit tests + :param connection: Connection to create the timer on. + :param start: Lower bound of range. + :param end: Upper bound of the time range + :param increment: +1, or -1 + :param precision: 100 for centisecond, 1000 for milliseconds + :param split_range: True to split the range between incrementing and decrementing. + """ + + # Various lists for tracking callback as completed or pending + pending_callbacks = [] + completed_callbacks = [] + + # submit timers with various timeouts + for gross_time in range(start, end, increment): + timeout = get_timeout(gross_time, start, end, precision, split_range) + callback = TimerCallback(timeout) + connection.create_timer(timeout, callback.invoke) + pending_callbacks.append(callback) + + # wait for all the callbacks associated with the timers to be invoked + while len(pending_callbacks) is not 0: + for callback in pending_callbacks: + if callback.was_invoked(): + pending_callbacks.remove(callback) + completed_callbacks.append(callback) + time.sleep(.1) + + # ensure they are all called back in a timely fashion + for callback in completed_callbacks: + unit_test.assertAlmostEqual(callback.expected_wait, callback.get_wait_time(), delta=.1) + + diff --git a/tox.ini b/tox.ini index b6b0b519dc..0b4a0b9f35 100644 --- a/tox.ini +++ b/tox.ini @@ -7,10 +7,12 @@ deps = nose PyYAML six + [testenv] deps = {[base]deps} - sure==1.2.3 + sure blist + setenv = USE_CASS_EXTERNAL=1 commands = {envpython} setup.py build_ext --inplace nosetests --verbosity=2 tests/unit/ @@ -19,8 +21,17 @@ commands = {envpython} setup.py build_ext --inplace [testenv:py26] deps = {[testenv]deps} unittest2 + twisted + eventlet + gevent # test skipping is different in unittest2 for python 2.7+; let's just use it where needed +[testenv:py27] +deps = {[testenv]deps} + twisted + eventlet + gevent + [testenv:pypy] deps = {[base]deps} commands = {envpython} setup.py build_ext --inplace From ea27ef120176c7ee13f6a61026e3a949193f7790 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Wed, 15 Jul 2015 16:49:14 -0500 Subject: [PATCH 1586/3726] Adding tests for python-313 --- .../standard/test_custom_protocol_handler.py | 218 ++++++++++++++++++ tox.ini | 2 +- 2 files changed, 219 insertions(+), 1 deletion(-) create mode 100644 tests/integration/standard/test_custom_protocol_handler.py diff --git a/tests/integration/standard/test_custom_protocol_handler.py b/tests/integration/standard/test_custom_protocol_handler.py new file mode 100644 index 0000000000..63d4b1991a --- /dev/null +++ b/tests/integration/standard/test_custom_protocol_handler.py @@ -0,0 +1,218 @@ +# Copyright 2013-2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa + +from cassandra.protocol import ProtocolHandler, ResultMessage, UUIDType, read_int, EventMessage +from cassandra.query import tuple_factory +from cassandra.cluster import Cluster +from tests.integration import use_singledc, PROTOCOL_VERSION, execute_until_pass +from tests.integration.datatype_utils import update_datatypes, PRIMITIVE_DATATYPES, get_sample +import uuid + + +def setup_module(): + use_singledc() + update_datatypes() + + +class CustomProtocolHandlerTest(unittest.TestCase): + + @classmethod + def setUpClass(cls): + cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION) + cls.session = cls.cluster.connect() + cls.session.execute("CREATE KEYSPACE custserdes WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1'}") + cls.session.set_keyspace("custserdes") + + @classmethod + def tearDownClass(cls): + cls.session.execute("DROP KEYSPACE custserdes") + cls.cluster.shutdown() + + def test_custom_raw_uuid_row_results(self): + """ + Test to validate that custom protocol handlers work with raw row results + + Connect and validate that the normal protocol handler is used. + Re-Connect and validate that the custom protocol handler is used. + Re-Connect and validate that the normal protocol handler is used. + + @since 2.7 + @jira_ticket PYTHON-313 + @expected_result custom protocol handler is invoked appropriately. + + @test_category data_types:serialization + """ + + # Ensure that we get normal uuid back first + session = Cluster().connect() + session.row_factory = tuple_factory + result_set = session.execute("SELECT schema_version FROM system.local") + result = result_set.pop() + uuid_type = result[0] + self.assertEqual(type(uuid_type), uuid.UUID) + + # use our custom protocol handlder + + session.client_protocol_handler = CustomTestRawRowType + session.row_factory = tuple_factory + result_set = session.execute("SELECT schema_version FROM system.local") + result = result_set.pop() + raw_value = result.pop() + self.assertEqual(type(raw_value), str) + self.assertEqual(len(raw_value), 16) + + # Ensure that we get normal uuid back when we re-connect + session.client_protocol_handler = ProtocolHandler + result_set = session.execute("SELECT schema_version FROM system.local") + result = result_set.pop() + uuid_type = result[0] + self.assertEqual(type(uuid_type), uuid.UUID) + session.shutdown() + + def test_custom_raw_row_results_all_types(self): + """ + Test to validate that custom protocol handlers work with varying types of + results + + Connect, create a table with all sorts of data. Query the data, make the sure the custom results handler is + used correctly. + + @since 2.7 + @jira_ticket PYTHON-313 + @expected_result custom protocol handler is invoked with various result types + + @test_category data_types:serialization + """ + # Connect using a custom protocol handler that tracks the various types the result message is used with. + session = Cluster().connect(keyspace="custserdes") + session.client_protocol_handler = CustomProtocolHandlerResultMessageTracked + session.row_factory = tuple_factory + + columns_string = create_table_with_all_types("test_table", session) + + # verify data + params = get_all_primitive_params() + results = session.execute("SELECT {0} FROM alltypes WHERE zz=0".format(columns_string))[0] + for expected, actual in zip(params, results): + self.assertEqual(actual, expected) + # Ensure we have covered the various primitive types + self.assertEqual(len(CustomResultMessageTracked.checked_rev_row_set), len(PRIMITIVE_DATATYPES)-1) + session.shutdown() + + +def create_table_with_all_types(table_name, session): + """ + Method that given a table_name and session construct a table that contains all possible primitive types + :param table_name: Name of table to create + :param session: session to use for table creation + :return: a string containing and columns. This can be used to query the table. + """ + # create table + alpha_type_list = ["zz int PRIMARY KEY"] + col_names = ["zz"] + start_index = ord('a') + for i, datatype in enumerate(PRIMITIVE_DATATYPES): + alpha_type_list.append("{0} {1}".format(chr(start_index + i), datatype)) + col_names.append(chr(start_index + i)) + + session.execute("CREATE TABLE alltypes ({0})".format(', '.join(alpha_type_list)), timeout=120) + + # create the input + params = get_all_primitive_params() + + # insert into table as a simple statement + columns_string = ', '.join(col_names) + placeholders = ', '.join(["%s"] * len(col_names)) + session.execute("INSERT INTO alltypes ({0}) VALUES ({1})".format(columns_string, placeholders), params, timeout=120) + return columns_string + + +def get_all_primitive_params(): + """ + Simple utility method used to give back a list of all possible primitive data sample types. + """ + params = [0] + for datatype in PRIMITIVE_DATATYPES: + params.append((get_sample(datatype))) + return params + + +class CustomResultMessageRaw(ResultMessage): + """ + This is a custom Result Message that is used to return raw results, rather then + results which contain objects. + """ + my_type_codes = ResultMessage.type_codes.copy() + my_type_codes[0xc] = UUIDType + type_codes = my_type_codes + + @classmethod + def recv_results_rows(cls, f, protocol_version, user_type_map): + paging_state, column_metadata = cls.recv_results_metadata(f, user_type_map) + rowcount = read_int(f) + rows = [cls.recv_row(f, len(column_metadata)) for _ in range(rowcount)] + coltypes = [c[3] for c in column_metadata] + return (paging_state, (coltypes, rows)) + + +class CustomTestRawRowType(ProtocolHandler): + """ + This is the a custom protocol handler that will substitute the the + customResultMesageRowRaw Result message for our own implementation + """ + my_opcodes = ProtocolHandler.message_types_by_opcode.copy() + my_opcodes[CustomResultMessageRaw.opcode] = CustomResultMessageRaw + message_types_by_opcode = my_opcodes + + +class CustomResultMessageTracked(ResultMessage): + """ + This is a custom Result Message that is use to track what primitive types + have been processed when it receives results + """ + my_type_codes = ResultMessage.type_codes.copy() + my_type_codes[0xc] = UUIDType + type_codes = my_type_codes + checked_rev_row_set = set() + + @classmethod + def recv_results_rows(cls, f, protocol_version, user_type_map): + paging_state, column_metadata = cls.recv_results_metadata(f, user_type_map) + rowcount = read_int(f) + rows = [cls.recv_row(f, len(column_metadata)) for _ in range(rowcount)] + colnames = [c[2] for c in column_metadata] + coltypes = [c[3] for c in column_metadata] + cls.checked_rev_row_set.update(coltypes) + parsed_rows = [ + tuple(ctype.from_binary(val, protocol_version) + for ctype, val in zip(coltypes, row)) + for row in rows] + return (paging_state, (colnames, parsed_rows)) + + +class CustomProtocolHandlerResultMessageTracked(ProtocolHandler): + """ + This is the a custom protocol handler that will substitute the the + CustomTestRawRowTypeTracked Result message for our own implementation + """ + my_opcodes = ProtocolHandler.message_types_by_opcode.copy() + my_opcodes[CustomResultMessageTracked.opcode] = CustomResultMessageTracked + message_types_by_opcode = my_opcodes + + diff --git a/tox.ini b/tox.ini index 23b2aee1f4..b6b0b519dc 100644 --- a/tox.ini +++ b/tox.ini @@ -3,7 +3,7 @@ envlist = py26,py27,pypy,py33,py34 [base] deps = nose - mock + mock<=1.0.1 PyYAML six From 3b7f879514cc472eee56683113e6e29b30c2a0b6 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Fri, 26 Jun 2015 17:25:08 -0500 Subject: [PATCH 1587/3726] Adding Extended SSL tests for client authentication --- tests/integration/long/ssl/driver_ca_cert.pem | 2 +- tests/integration/long/ssl/python_driver.crt | Bin 0 -> 947 bytes tests/integration/long/ssl/python_driver.jks | Bin 0 -> 2306 bytes tests/integration/long/ssl/python_driver.key | 34 ++++ tests/integration/long/ssl/python_driver.pem | 19 +++ .../long/ssl/python_driver_bad.pem | 19 +++ .../long/ssl/python_driver_no_pass.key | 27 ++++ tests/integration/long/ssl/server_cert.pem | 13 ++ .../ssl/{keystore.jks => server_keystore.jks} | Bin tests/integration/long/ssl/server_trust.jks | Bin 0 -> 1677 bytes tests/integration/long/test_ssl.py | 148 ++++++++++++++++-- 11 files changed, 252 insertions(+), 10 deletions(-) create mode 100644 tests/integration/long/ssl/python_driver.crt create mode 100644 tests/integration/long/ssl/python_driver.jks create mode 100644 tests/integration/long/ssl/python_driver.key create mode 100644 tests/integration/long/ssl/python_driver.pem create mode 100644 tests/integration/long/ssl/python_driver_bad.pem create mode 100644 tests/integration/long/ssl/python_driver_no_pass.key create mode 100644 tests/integration/long/ssl/server_cert.pem rename tests/integration/long/ssl/{keystore.jks => server_keystore.jks} (100%) create mode 100644 tests/integration/long/ssl/server_trust.jks diff --git a/tests/integration/long/ssl/driver_ca_cert.pem b/tests/integration/long/ssl/driver_ca_cert.pem index 79ec6af4bb..7e55555767 100644 --- a/tests/integration/long/ssl/driver_ca_cert.pem +++ b/tests/integration/long/ssl/driver_ca_cert.pem @@ -6,7 +6,7 @@ Fw0xNDEwMDMxNTQ2NDdaFw0xNTAxMDExNTQ2NDdaMGoxCzAJBgNVBAYTAlRFMQsw CQYDVQQIEwJDQTEUMBIGA1UEBxMLU2FudGEgQ2xhcmExETAPBgNVBAoTCERhdGFT dGF4MQswCQYDVQQLEwJURTEYMBYGA1UEAxMPUGhpbGlwIFRob21wc29uMIGfMA0G CSqGSIb3DQEBAQUAA4GNADCBiQKBgQChGDwrhpQR0d+NoqilMgsBlR6A2Dd1oMyI -Ue42sU4tN63g5N44Ic4RpTiyaWgnAkP332ok3YAuVbxytwEv2K9HrUSiokAiuinl +Ue42sU4tN63g5N4adasfasdfsWgnAkP332ok3YAuVbxytwEv2K9HrUSiokAiuinl hhHA8CXTHt/1ItzzWj9uJ3Hneb+5lOkXVTZX7Y+q3aSdpx/HnZqn4i27DtLZF0z3 LccWPWRinQIDAQABoyEwHzAdBgNVHQ4EFgQU9WJpUhgGTBBH4xZBCV7Y9YISCp4w DQYJKoZIhvcNAQELBQADgYEAF6e8eVAjoZhfyJ+jW5mB0pXa2vr5b7VFQ45voNnc diff --git a/tests/integration/long/ssl/python_driver.crt b/tests/integration/long/ssl/python_driver.crt new file mode 100644 index 0000000000000000000000000000000000000000..0a419f4eb13cea8866961205155cfc16f91c2f7e GIT binary patch literal 947 zcmXqLVqS01#58>YGZP~d6N|{_iR=cvY@Awc9&O)w85y}*84McR47m+B*_cCF*o2uv zgAIiZ1VJ1QVJ_#yoXoWRqP)yRLlFZZkRZD-cW`1}Nuq*tPGV7_p_qXPNR&&M*Cnwe zF}NhLLcudHSRS(1^Tr{GeQS(aL)5RzJ4QVi3@EX<1{YoH*`YiMd< zW@Kt;ZeU<&5hcNIWC#*4FoX&W9;Y@jDj^3WBP#=Q6C*!^K@%evQxhX2L$AlKqdyl_ z?ArKU;kTS5_^v9{bQsa zIg0U195b~P()hh8G4{ed-E209C*_74en1zuW;g#B~x*&@>IE-e1#k#t7+0tZ&_8Ga4mscYPA}}oi1CfzI zI^nL;(I9teMJ-9|{wpPAX6GA%Rj0mv_I%~HMWRmc&MrDtym9%16Nx1WES#&|Pq#=q z^2Sst3+eMtl|QrGBjgyTFGqFCyeDPH6AOhDH7D)R^D^&vqY|kr6?g1{{fn5mJ#1lW z9O3IHuFH(mds9#?8n6CFoMXDoqu;kHkLFmN-8buO!S~}LabJV3-uGHkW&UQ<)|t}k z2|t$Hf6!Rj`YQWivB7lHvVi?1ceFXnIMYLTgyzh$Z#i-I=aS~bo%c8+CvyFoZyOwN zhGz!%yV8Asm3B_hknVfsU&}kulY61|BDwkX2ltClllXbHW!1qFt;LD@w>O;H)pkea N`V%ji<^;BhNdSf*Rxtno literal 0 HcmV?d00001 diff --git a/tests/integration/long/ssl/python_driver.jks b/tests/integration/long/ssl/python_driver.jks new file mode 100644 index 0000000000000000000000000000000000000000..9a0fd59f738b77c276f06fc0048a0b52fe64b345 GIT binary patch literal 2306 zcmd6o`8U*$8pmhMUl2=ee@dB9Mx2A2pJ%ngFFpqupzncJdI1r=?# z&f#Ba%M?93!9aIFimfzz#$~&b5}AdIf2O7QGp%hC^k#y1A4Qbw*{J?g7gOx)l71$* zGhtkyj3TVmmwt&|+sapPQ18)Fm*Z6)T^K)Fp);#BN?i_WL_B)z@vY|FgPNDT8iPG_ zYgdx=r?7$Wj^Ia^rh=bxrK(9dpYQ zOjPrdN}sAztJp^0m|Dr0q{xEQaG1c@F|&s}oQNjn{eLkSS1eP;l*eTtM(*fFc$mD2 zTd_N?vb1jIC=WkpWW^5#d#J2UAg>RA%;njPiEB}V8uvb2^ze!ec<>_TD^)b8*OC@+ zitTSRyx88_a3C4y2;TS>d(PV=kBPNWH5w7On&~!j&YZWJd|Z&*WGX1DQe`w+Mrd|0 zTMmJt1;`ksdv5#DYiLidK#LEUi;uIe(ftZ`HU5OKqYC1y1GG072czM^`TXl!8-69z zm|F&ZhFw}i34H{i$#55kI*H!XMRrWL@Vv=rPy5Sie{5-um(w`Lp^7~%G3FTNE=aKF z4ejLlv^(t*_!94cCl-Wva-5n=WqNgPH`@0aDM4MCs@(fW+~y+sJw!%rKNn9~&6Sbt%-y~&_KQfWu8+ww; zJ1pZg*HiwmlU<4NlpQicP@YR-$G%%=<(e%v!_)%=&&LvLQeLHdJpb56aQ>K#r6kU) zB#+{Ib&!N))e3`{x$LPU+s_&gI*1L?{KA@$eu4g^smcK+X&{Fh6hnjCvQnm=I2jF< zySC=T2p>5GtO(#>I|ldqXGR+0mA zHTeT(osl5jkhZq1=+|P~W%`!_%$GQZ&t6fQu1$;U(QieVz-&AJaV+1i!A*rMVJ6^u zP>Y@D=zCiivC{)qBU!a)8jY(9rm>@B@MXgy6rjv?Ij^T0z$S3k?tU8hy{KlH$C z&^nv2rY~yauJ1#$U+BAbyLV)h%l0f4Mv%4#wx1`o2_GL(IG*e=i=ChFNx)mjKz%(I z)kIQSjC$+Xk@ZET+UC06+7%&*LUQ@+)jeMa>Ch3xt?-+x+Ny&y9M8$(^Z1gm@xYjY zt0hCq0`aR~)LX%hr85OjY|deR+mTmk#%Vfx?@$GIw~%(?XYJ zMO`#HpOV^L$;_L*Z5%hMlbkHH=aG+^%Q(p*M7qXDcax&7izeU2AEdfA&q&e(vtI0a z+_$`osoRLcTz)Baa_x?T%1>gptc%@pFex?92mP6_Qeo!&ass`PyjHDu;Wl9wcKvrg za=U5a0RjjFX#!Y~2LOxfMj3<)%*6#o?dI|V2p+gbidD*21Q^WE4FVEVaQpzA2jUFn z5#@4r!ifSRzZ#e*pD8sY&_66PG?0oz0mxsFSCrq08cL_in}$#$sW@=}^^5X}BFw0C zsuP_WBY!&doEA0sD@$kt&SYRO#zykAevA|$ZmKA&OTSW}J@vHp4EE>Eu z%!zAmpB2#$tctYO>76O{H{2aH*hP;|YNqO=@{I-N z;Y|X{<-jtt8I@rvoS*%ajro+d%byfyKX2z_7-%&wCY;O;cijZT8FnYQ6XE1>m8Lg; zH34T`sFr;jFI&{9`|s*^EKZI>VLjbK(nA*?Zx-Ba+Z?b_wHB%`z_GHxtLoa0C~?DF zzw)^TpEEhRy|VjJ3}tBi)LSy8lgC9B=K3JFK9HjIKAb6bR_(n6?1uEp{ybwa ssH7g7Zx~^xEGW;eSxaB`=E?PGO<$;UD<*~oO=dDeTg@A{mK0L{4ZtwgI{*Lx literal 0 HcmV?d00001 diff --git a/tests/integration/long/ssl/python_driver.key b/tests/integration/long/ssl/python_driver.key new file mode 100644 index 0000000000..afd73b298c --- /dev/null +++ b/tests/integration/long/ssl/python_driver.key @@ -0,0 +1,34 @@ +Bag Attributes + friendlyName: python_driver + localKeyID: 54 69 6D 65 20 31 34 33 35 33 33 33 34 30 34 33 33 32 +Key Attributes: +-----BEGIN RSA PRIVATE KEY----- +Proc-Type: 4,ENCRYPTED +DEK-Info: DES-EDE3-CBC,8A0BC9CFBBB36D47 + +J3Rh82LhsNdIdCV4KCp758VIJJnmedwtq/I9oxH5kY4XoUQjfNcvLGlEnbAUD6+N +mYnQ5XPDvD7iC19XvlA9gfaoWERq+zroGEP+e4dX1X5RlT6YQBJpJR8IW4DWngDM +Nv6CuaGFJWMH8QUvKlJyFOPOHBqbhsCRaxg3pOG3RyUFXpGPDV0ySUyp6poHE9KE +pEVif/SdS3AhV2sb4tyBS9sRZdH1eeCN4gY6k9PQWyNViAgUYAG5xWsE4fITa3qY +gisyzbOYU8ue2QvmjPJgieiKPQf+st/ZRV5eQUCdUgAfLEnULGJXRZ5kw7kMXL0X +gLaKFbGxv4pKQCDCZQq4GXIA/nmTy6cme+VPKwq3usm+GdxfdWQJjgG65+AFaut/ +XjGm1fvSQzWuzpesfLy57HMK+bBh1/UKjuQa3wAHtgPtJLtUSW+/qBnQRdBbl20C +dJtJXyyTlX6H8bQBIfBLc4ntUwS8fVd2jsYJRpCBY6HdtpfsZZ5gQnm1Nux4ksKn +rYYx3I/JpChr7AV7Yj/lwc3Zca/VJl16CjyWeRTQEvkl6GK958aIzj73HfXleZc6 +HGVfOgo2BLmOzY0ZCq/Wa4fnURRgrC3SusrT9mjVbID91oNYw4BjMEU53u0uxPC+ +rr6SwG2EUVawGTVK4XZw2DINCPP/wsKqf0xqA+sxArcTN/MEdLUBdf8oDntkj2jG +Oy0kwpjqhSvWo1DqYKZjV/wKT2SS18OMAW+0qplbHw1/FDGWK+OseD8MXwBo06a5 +LWRQXhf0kEXUQ+oNj3eahe/npHiNChR6mEiIbCuE3NAXPPXJNkhMuj2f5EqrOPfZ +jqbNiLfKKx7L5t6B8LXkdKGPqztcFlnB8rRF9Eqa8F4wiEg8MBLrPyxgd/uT+NIz +LdDgvUE+IkCwQoYoCU70ApiEOyQNacuSxwUiVWVyn9CJYXPM4Vlje7GDIDRR5Xp6 +zNf0ktNP46PsRqDlYG9hZWndj4PRaAqtatlEEm37rmyouVBe3rxcbL1b1zsH/p1I +eaGGTyZ8+iEiuEk4gCOmfmYmpE7H/DXlQvtDRblid/bEY64Uietx0HQ5yZwXZYi8 +hb4itke6xkgRQEIXVyQOdU88PEuA5yofEGoXkfdLgtdu3erPrVDc+nQTYrMWNacR +JQljfhAFJdjOw81Yd5PnFHAtxcxzqEkWv0TGQLL1VjJdinhI7q/fIPLJ76FtuGmt +zlxo/Jy1aaUgM/e485+7aoNSGi2/t6zGqGuotdUCO5epgrUHX+9fOJnnrYTG9ixp +FSHTT69y72khnw3eMP8NnOS3Lu+xLEzQHNbUDfB8uyVEX4pyA3FPVVqwIaeJDiPS +2x7Sl5KKwLbqPPKRFRC1qLsN4KcqeXBG+piTLPExdzsLbrU9JZMcaNmSmUabdg20 +SCwIuU2kHEpO7O7yNGeV9m0CGFUaoCAHVG70oXHxpVjAJbtgyoBkiwSxghCxXkfW +Mg+1B2k4Gk1WrLjIyasH6p0MLUJ7qLYN+c+wF7ms00F/w04rM6zUpkgnqsazpw6F +weUhpA8qY2vOJN6rsB4byaOUnd33xhAwcY/pIAcjW7UBjNmFMB1DQg== +-----END RSA PRIVATE KEY----- diff --git a/tests/integration/long/ssl/python_driver.pem b/tests/integration/long/ssl/python_driver.pem new file mode 100644 index 0000000000..83556fd9ce --- /dev/null +++ b/tests/integration/long/ssl/python_driver.pem @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDrzCCApegAwIBAgIEFPORBzANBgkqhkiG9w0BAQsFADCBhjELMAkGA1UEBhMCVVMxEzARBgNV +BAgTCkNhbGlmb3JuaWExFDASBgNVBAcTC1NhbnRhIENsYXJhMRYwFAYDVQQKEw1EYXRhU3RheCBJ +bmMuMRwwGgYDVQQLExNQeXRob24gRHJpdmVyIFRlc3RzMRYwFAYDVQQDEw1QeXRob24gRHJpdmVy +MCAXDTE1MDYyNTE3MDAxOFoYDzIxMTUwNjAxMTcwMDE4WjCBhjELMAkGA1UEBhMCVVMxEzARBgNV +BAgTCkNhbGlmb3JuaWExFDASBgNVBAcTC1NhbnRhIENsYXJhMRYwFAYDVQQKEw1EYXRhU3RheCBJ +bmMuMRwwGgYDVQQLExNQeXRob24gRHJpdmVyIFRlc3RzMRYwFAYDVQQDEw1QeXRob24gRHJpdmVy +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAjUi6xfmieLqx9yD7HhkB6sjHfbS51xE7 +aaRySjTA1p9mPfPMPPMZ0NIsiDsUlT7Fa+LWZU9cGuJBFg8YxjU5Eij7smFd0J4tawk51KudDdUZ +crALGFC3WY7sEboMl8UHqV+kESPlNm5/JSNSYkNm1TMi9mHcB/Bg3RDpORRW/keMtBSLRxCVjsu6 +GvKN8wuEfU/bTmI9aUjbFRCFunBX6QEJeU44BYEJXNAls+X8szBfVmFHwefatSlh++uu7kY6zAQI +v74PHMZ8w+mWmbjpxEsmSg+uljGCjQHjKTNSFBY9kWWh2LBiTcZuEsQ9DK0J/+1tUa0s5vq6CjUK +XRxwpQIDAQABoyEwHzAdBgNVHQ4EFgQUJwTYG8dcZDt7faalYwCHmG3jp3swDQYJKoZIhvcNAQEL +BQADggEBABtg3SLFUkcbISoZO4/UdHY2z4BTJZXt5uep9qIVQu7NospzsafgyGF0YAQJq0fLhBlB +DVx6IxIvDZUfzKdIVMYJTQh7ZJ7kdsdhcRIhKZK4Lko3iOwkWS0aXsbQP+hcXrwGViYIV6+Rrmle +LuxwexVfJ+wXCJcc4vvbecVsOs2+ms1w98cUXvVS1d9KpHo37LK1mRsnYPik3+CBeYXqa8FzMJc1 +dlC/dNwrCXYJZ1QMEpyaP4TI3fmkg8OJ3glZkQr6nz1TUMwMmAvudb79IrmQKBuO6k99DZFJC6Er +oh6ff8G/F5YY+dWEqsF0KqNhL9uwyrqG3CTX5Eocg2AGkWI= +-----END CERTIFICATE----- diff --git a/tests/integration/long/ssl/python_driver_bad.pem b/tests/integration/long/ssl/python_driver_bad.pem new file mode 100644 index 0000000000..978d6c53f3 --- /dev/null +++ b/tests/integration/long/ssl/python_driver_bad.pem @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDrzCCApegAwIBAgIEFPORBzANBgkqhkiG9w0BAQsFADCBhjELMAkGA1UEBhMCVVMxEzARBgNV +BAgTCkNhbGlmb3JuaWExFDASBgNVBAcTC1NhbnRhIENsYXJhMRYwFAYDVQQKEw1EYXRhU3RheCBJ +bmMuMRwwGgYDVQQLExNQeXRob24gRHJpdmVyIFRlc3RzMRYwFAYDVQQDEw1QeXRob24gRHJpdmVy +MCAXDTE1MDYyNTE3MDAxOFoYDzIxMTUwNjAxMTcwMDE4WjCBhjELMAkGA1UEBhMCVVMxEzARBgNV +BAgTCkNhbGlmb3JuaWExFDASBgNVBAcTC1NhbnRhIENsYXJhMRYwFAYDVQQKEw1EYXRhU3RheCBJ +bmMuMRwwGgYDVQQLExNQeXRob24gRHJpdmVyIFRlc3RzMRYwFAYDVQQDEw1QeXRob24gRHJpdmVy +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAjUi6xfmieLqx9yD7HhkB6sjHfbS51xE7 +aaRySjTA1p9mPfPMPPMZ0NIsiDsUlT7Fa+LWZU9cGuJBFg8YxjU5Eij7smFd0J4tawk51KudDdUZ +crALGFC3WY7sEboMl8UHqV+kESPlNm5/JSNSYkNm1TMi9mHcB/Bg3RDpORRW/keMtBSLRxCVjsu6 +GvKN8wuEfU/bTmI9aUjbFRCFunBX6QEJeU44BYEJXNAls+X8szBfVmFHwefatSlh++uu7kY6zAQI +v74PHMZ8w+mWmbjpxEsmSg+uljGCjQHjKTNSFBY9kWWh2LBiTcZuEsQ9DK0J/+1tUa0s5vq6CjUK +XRxwpQIDAQABoyE666666gNVHQ4EFgQUJwTYG8dcZDt7faalYwCHmG3jp3swDQYJKoZIhvcNAQEL +BQADggEBABtg3SLFUkcbISoZO4/UdHY2z4BTJZXt5uep9qIVQu7NospzsafgyGF0YAQJq0fLhBlB +DVx6IxIvDZUfzKdIVMYJTQh7ZJ7kdsdhcRIhKZK4Lko3iOwkWS0aXsbQP+hcXrwGViYIV6+Rrmle +LuxwexVfJ+wXCJcc4vvbecVsOs2+ms1w98cUXvVS1d9KpHo37LK1mRsnYPik3+CBeYXqa8FzMJc1 +dlC/dNwrCXYJZ1QMEpyaP4TI3fmkg8OJ3glZkQr6nz1TUMwMmAvudb79IrmQKBuO6k99DZFJC6Er +oh6ff8G/F5YY+dWEqsF0KqNhL9uwyrqG3CTX5Eocg2AGkWI= +-----END CERTIFICATE----- diff --git a/tests/integration/long/ssl/python_driver_no_pass.key b/tests/integration/long/ssl/python_driver_no_pass.key new file mode 100644 index 0000000000..8dd14f84f0 --- /dev/null +++ b/tests/integration/long/ssl/python_driver_no_pass.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAjUi6xfmieLqx9yD7HhkB6sjHfbS51xE7aaRySjTA1p9mPfPM +PPMZ0NIsiDsUlT7Fa+LWZU9cGuJBFg8YxjU5Eij7smFd0J4tawk51KudDdUZcrAL +GFC3WY7sEboMl8UHqV+kESPlNm5/JSNSYkNm1TMi9mHcB/Bg3RDpORRW/keMtBSL +RxCVjsu6GvKN8wuEfU/bTmI9aUjbFRCFunBX6QEJeU44BYEJXNAls+X8szBfVmFH +wefatSlh++uu7kY6zAQIv74PHMZ8w+mWmbjpxEsmSg+uljGCjQHjKTNSFBY9kWWh +2LBiTcZuEsQ9DK0J/+1tUa0s5vq6CjUKXRxwpQIDAQABAoIBAC3bpYQM+wdk0c79 +DYU/aLfkY5wRxSBhn38yuUYMyWrgYjdJoslFvuNg1MODKbMnpLzX6+8GS0cOmUGn +tMrhC50xYEEOCX1lWiib3gGBkoCi4pevPGqwCFMxaL54PQ4mDc6UFJTbqdJ5Gxva +0yrB5ebdqkN+kASjqU0X6Bt21qXB6BvwAgpIXSX8r+NoH2Z9dumSYD+bOwhXo+/b +FQ1wyLL78tDdlJ8KibwnTv9RtLQbALUinMEHyP+4Gp/t/JnxlcAfvEwggYBxFR1K +5sN8dMFbMZVNqNREXZyWCMQqPbKLhIHPHlNo5pJP7cUh9iVH4QwYNIbOqUza/aUx +z7DIISECgYEAvpAAdDiBExMOELz4+ku5Uk6wmVOMnAK6El4ijOXjJsOB4FB6M0A6 +THXlzLws0YLcoZ3Pm91z20rqmkv1VG+En27uKC1Dgqqd4DOQzMuPoPxzq/q2ozFH +V5U1a0tTmyynr3CFzQUJKLJs1pKKIp6HMiB48JWQc5q6ZaaomEnOiYsCgYEAvczB +Bwwf7oaZGhson1HdcYs5kUm9VkL/25dELUt6uq5AB5jjvfOYd7HatngNRCabUCgE +gcaNfJSwpbOEZ00AxKVSxGmyIP1YAlkVcSdfAPwGO6C1+V4EPHqYUW0AVHOYo7oB +0MCyLT6nSUNiHWyI7qSEwCP03SqyAKA1pDRUVI8CgYBt+bEpYYqsNW0Cn+yYlqcH +Jz6n3h3h03kLLKSH6AwlzOLhT9CWT1TV15ydgWPkLb+ize6Ip087mYq3LWsSJaHG +WUC8kxLJECo4v8mrRzdG0yr2b6SDnebsVsITf89qWGUVzLyLS4Kzp/VECCIMRK0F +ctQZFFffP8ae74WRDddSbQKBgQC7vZ9qEyo6zNUAp8Ck51t+BtNozWIFw7xGP/hm +PXUm11nqqecMa7pzG3BWcaXdtbqHrS3YGMi3ZHTfUxUzAU4zNb0LH+ndC/xURj4Z +cXJeDO01aiDWi5LxJ+snEAT1hGqF+WX2UcVtT741j/urU0KXnBDb5jU92A++4rps +tH5+LQKBgGHtOWD+ffKNw7IrVLhP16GmYoZZ05zh10d1eUa0ifgczjdAsuEH5/Aq +zK7MsDyPcQBH/pOwAcifWGEdXmn9hL6w5dn96ABfa8Qh9nXWrCE2OFD81PDU9Osd +wnwbTKlYWPBwdF7UCseKC7gXkUD6Ls0ADWJvrCI7AfQJv6jj6nnE +-----END RSA PRIVATE KEY----- diff --git a/tests/integration/long/ssl/server_cert.pem b/tests/integration/long/ssl/server_cert.pem new file mode 100644 index 0000000000..7c96b96ade --- /dev/null +++ b/tests/integration/long/ssl/server_cert.pem @@ -0,0 +1,13 @@ +-----BEGIN CERTIFICATE----- +MIICbjCCAdegAwIBAgIEP/N06DANBgkqhkiG9w0BAQsFADBqMQswCQYDVQQGEwJURTELMAkGA1UE +CBMCQ0ExFDASBgNVBAcTC1NhbnRhIENsYXJhMREwDwYDVQQKEwhEYXRhU3RheDELMAkGA1UECxMC +VEUxGDAWBgNVBAMTD1BoaWxpcCBUaG9tcHNvbjAeFw0xNDEwMDMxNTQ2NDdaFw0xNTAxMDExNTQ2 +NDdaMGoxCzAJBgNVBAYTAlRFMQswCQYDVQQIEwJDQTEUMBIGA1UEBxMLU2FudGEgQ2xhcmExETAP +BgNVBAoTCERhdGFTdGF4MQswCQYDVQQLEwJURTEYMBYGA1UEAxMPUGhpbGlwIFRob21wc29uMIGf +MA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQChGDwrhpQR0d+NoqilMgsBlR6A2Dd1oMyIUe42sU4t +N63g5N44Ic4RpTiyaWgnAkP332ok3YAuVbxytwEv2K9HrUSiokAiuinlhhHA8CXTHt/1ItzzWj9u +J3Hneb+5lOkXVTZX7Y+q3aSdpx/HnZqn4i27DtLZF0z3LccWPWRinQIDAQABoyEwHzAdBgNVHQ4E +FgQU9WJpUhgGTBBH4xZBCV7Y9YISCp4wDQYJKoZIhvcNAQELBQADgYEAF6e8eVAjoZhfyJ+jW5mB +0pXa2vr5b7VFQ45voNncGrB3aNbz/AWT7LCJw88+Y5SJITgwN/8o3ZY6Y3MyiqeQYGo9WxDSWb5A +dZWFa03Z+hrVDQuw1r118zIhdS4KYDCQM2JfWY32TwK0MNG/6BO876HfkDpcjCYzq8Gh0gEguOA= +-----END CERTIFICATE----- diff --git a/tests/integration/long/ssl/keystore.jks b/tests/integration/long/ssl/server_keystore.jks similarity index 100% rename from tests/integration/long/ssl/keystore.jks rename to tests/integration/long/ssl/server_keystore.jks diff --git a/tests/integration/long/ssl/server_trust.jks b/tests/integration/long/ssl/server_trust.jks new file mode 100644 index 0000000000000000000000000000000000000000..feb0784a06189e4abb5d763d0bf011590a311c1f GIT binary patch literal 1677 zcmezO_TO6u1_mZL=1fj3E>6r#DN1BuVD!`BoBN-EHA2tSz!Ipa$e@WS&!CC%`T}Mq zMkXc}`_CmW40zc%wc0$|zVk9Nayplu(=bXf%L_X)h&(lkwb!!m4Ugjm%*U1lc}+h zVWEVLcH0!ei}!mMtypTr%{W!A;f8tXf-@a~@60y(>6)*7@Z_F_;yJ;k7Mn6N)R~;W z-_KIH+n^V^r)WE){*CqSYh4yCa!}f(`Ls>&zz5aKa`(R~-T544pQm2P(0Ta0UGkJpMGFJ-{~C9vStS=6buFKekYyV!a4B-1 zL+RAkY~P!|q^|OEZ@9L%^s|v-sUBB?!35)^_{iRG{!Cj8F7AIJyyyMG`xC5Uden?p zA6$5eQDMgeaJu9zs4U6I&x=nf$}CGQ0;fxU^FDaGWZrDh#JnDuHm5_=rO4-r>}Y;5 zXl#R}DqzA44K@@u5Co+v4q-0m#GK5u{GzviJ%s>Q^KzU&aRKYVZS=`RIzL0cZJ__l8moT9IxH7^SYpQ=8_^W zlLOc0r`di!WAj<^!X=#!YmupTN3$PYOZAVDdgLg^FLBJ&Qb^2X_BpmkS4_)A94No^pnS!O*@|8Nv4e8@CDc04s%a$fHw9m+W zyu2DY5JCA06n4@Hca@F?xl1c*Nm}<`DJe5M-w>=i_3g9gE59ugb$WMp(W&B%%O9Lb zEJ&=L*$0aarkj=p>@T^a%~{5o9>OCuXO?}-iMv0SG#~D~ z#~C@1>(_kS;D9qcGq~TC?)$5>bApC+-z)!G-ie;v3$+)?&96VWUwoRx&#Nt~4wh&w tPSn4>;nc3SJ1W 5: + raise RuntimeError("Failed to connect to SSL cluster after 5 attempts") + try: + cluster = Cluster(protocol_version=PROTOCOL_VERSION, ssl_options={'ca_certs': abs_path_ca_cert_path, + 'ssl_version': ssl.PROTOCOL_TLSv1, + 'keyfile': abs_driver_keyfile, + 'certfile': abs_driver_certfile}) + + session = cluster.connect() + break + except Exception: + ex_type, ex, tb = sys.exc_info() + log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) + del tb + tries += 1 + + # attempt a few simple commands. + + insert_keyspace = """CREATE KEYSPACE ssltest + WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '3'} + """ + statement = SimpleStatement(insert_keyspace) + statement.consistency_level = 3 + session.execute(statement) + + drop_keyspace = "DROP KEYSPACE ssltest" + statement = SimpleStatement(drop_keyspace) + statement.consistency_level = ConsistencyLevel.ANY + session.execute(statement) + + cluster.shutdown() + + def test_cannot_connect_without_client_auth(self): + """ + Test to validate that we cannot connect without client auth. + + This test will omit the keys/certs needed to preform client authentication. It will then attempt to connect + to a server that has client authentication enabled. + + @since 2.7.0 + @expected_result The client will throw an exception on connect + + @test_category connection:ssl + """ + + abs_path_ca_cert_path = os.path.abspath(CLIENT_CA_CERTS) + cluster = Cluster(protocol_version=PROTOCOL_VERSION, ssl_options={'ca_certs': abs_path_ca_cert_path, + 'ssl_version': ssl.PROTOCOL_TLSv1}) + # attempt to connect and expect an exception + + with self.assertRaises(NoHostAvailable) as context: + cluster.connect() + + def test_cannot_connect_with_bad_client_auth(self): + """ + Test to validate that we cannot connect with invalid client auth. + + This test will use bad keys/certs to preform client authentication. It will then attempt to connect + to a server that has client authentication enabled. + + + @since 2.7.0 + @expected_result The client will throw an exception on connect + + @test_category connection:ssl + """ + + # Setup absolute paths to key/cert files + abs_path_ca_cert_path = os.path.abspath(CLIENT_CA_CERTS) + abs_driver_keyfile = os.path.abspath(DRIVER_KEYFILE) + abs_driver_certfile = os.path.abspath(DRIVER_CERTFILE_BAD) + + cluster = Cluster(protocol_version=PROTOCOL_VERSION, ssl_options={'ca_certs': abs_path_ca_cert_path, + 'ssl_version': ssl.PROTOCOL_TLSv1, + 'keyfile': abs_driver_keyfile, + 'certfile': abs_driver_certfile}) + with self.assertRaises(NoHostAvailable) as context: + cluster.connect() \ No newline at end of file From 04bbff3d3b5666636de94edb9c33549bb0043af6 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Tue, 21 Jul 2015 14:54:24 -0500 Subject: [PATCH 1588/3726] Unlock while yielding execute concurrent generator results Fixes an issue where the event thread could be held up on the executor lock while the client thread waits for a paged result to return. --- cassandra/concurrent.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cassandra/concurrent.py b/cassandra/concurrent.py index aba3045605..7c0d3fd7e1 100644 --- a/cassandra/concurrent.py +++ b/cassandra/concurrent.py @@ -166,9 +166,11 @@ def _results(self): self._condition.wait() while self._results_queue and self._results_queue[0][0] == self._current: _, res = heappop(self._results_queue) + self._condition.release() if self._fail_fast and not res[0]: self._raise(res[1]) yield res + self._condition.acquire() self._current += 1 From 791f44c6ddd7eac44013e887e38c6f21de4d5dd7 Mon Sep 17 00:00:00 2001 From: GregBestland Date: Mon, 20 Jul 2015 14:19:47 -0500 Subject: [PATCH 1589/3726] Adding unit and integration tests for PYTHON-123. --- tests/integration/standard/test_concurrent.py | 87 ++++++- tests/unit/test_concurrent.py | 227 ++++++++++++++++++ 2 files changed, 310 insertions(+), 4 deletions(-) create mode 100644 tests/unit/test_concurrent.py diff --git a/tests/integration/standard/test_concurrent.py b/tests/integration/standard/test_concurrent.py index 9a82338348..45b736132c 100644 --- a/tests/integration/standard/test_concurrent.py +++ b/tests/integration/standard/test_concurrent.py @@ -50,11 +50,11 @@ def setUpClass(cls): def tearDownClass(cls): cls.cluster.shutdown() - def execute_concurrent_helper(self, session, query): + def execute_concurrent_helper(self, session, query, results_generator=False): count = 0 while count < 100: try: - return execute_concurrent(session, query) + return execute_concurrent(session, query, results_generator=False) except (ReadTimeout, WriteTimeout, OperationTimedOut, ReadFailure, WriteFailure): ex_type, ex, tb = sys.exc_info() log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) @@ -63,11 +63,11 @@ def execute_concurrent_helper(self, session, query): raise RuntimeError("Failed to execute query after 100 attempts: {0}".format(query)) - def execute_concurrent_args_helper(self, session, query, params): + def execute_concurrent_args_helper(self, session, query, params, results_generator=False): count = 0 while count < 100: try: - return execute_concurrent_with_args(session, query, params) + return execute_concurrent_with_args(session, query, params, results_generator=results_generator) except (ReadTimeout, WriteTimeout, OperationTimedOut, ReadFailure, WriteFailure): ex_type, ex, tb = sys.exc_info() log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) @@ -120,6 +120,40 @@ def test_execute_concurrent_with_args(self): self.assertEqual(num_statements, len(results)) self.assertEqual([(True, [(i,)]) for i in range(num_statements)], results) + def test_execute_concurrent_with_args_generator(self): + """ + Test to validate that generator based results are surfaced correctly + + Repeatedly inserts data into a a table and attempts to query it. It then validates that the + results are returned in the order expected + + @since 2.7.0 + @jira_ticket PYTHON-123 + @expected_result all data should be returned in order. + + @test_category queries:async + """ + for num_statements in (0, 1, 2, 7, 10, 99, 100, 101, 199, 200, 201): + statement = SimpleStatement( + "INSERT INTO test3rf.test (k, v) VALUES (%s, %s)", + consistency_level=ConsistencyLevel.QUORUM) + parameters = [(i, i) for i in range(num_statements)] + + results = self.execute_concurrent_args_helper(self.session, statement, parameters, results_generator=True) + for result in results: + self.assertEqual((True, None), result) + + # read + statement = SimpleStatement( + "SELECT v FROM test3rf.test WHERE k=%s", + consistency_level=ConsistencyLevel.QUORUM) + parameters = [(i, ) for i in range(num_statements)] + + results = self.execute_concurrent_args_helper(self.session, statement, parameters, results_generator=True) + for i in range(num_statements): + result = results.next() + self.assertEqual((True, [(i,)]), result) + def test_execute_concurrent_paged_result(self): if PROTOCOL_VERSION < 2: raise unittest.SkipTest( @@ -150,6 +184,51 @@ def test_execute_concurrent_paged_result(self): self.assertIsInstance(result, PagedResult) self.assertEqual(num_statements, sum(1 for _ in result)) + def test_execute_concurrent_paged_result_generator(self): + """ + Test to validate that generator based results are surfaced correctly when paging is used + + Inserts data into a a table and attempts to query it. It then validates that the + results are returned as expected (no order specified) + + @since 2.7.0 + @jira_ticket PYTHON-123 + @expected_result all data should be returned in order. + + @test_category paging + """ + if PROTOCOL_VERSION < 2: + raise unittest.SkipTest( + "Protocol 2+ is required for Paging, currently testing against %r" + % (PROTOCOL_VERSION,)) + + num_statements = 201 + statement = SimpleStatement( + "INSERT INTO test3rf.test (k, v) VALUES (%s, %s)", + consistency_level=ConsistencyLevel.QUORUM) + parameters = [(i, i) for i in range(num_statements)] + + results = self.execute_concurrent_args_helper(self.session, statement, parameters, results_generator=True) + self.assertEqual(num_statements, sum(1 for _ in results)) + + # read + statement = SimpleStatement( + "SELECT * FROM test3rf.test LIMIT %s", + consistency_level=ConsistencyLevel.QUORUM, + fetch_size=int(num_statements / 2)) + parameters = [(i, ) for i in range(num_statements)] + + paged_results_gen = self.execute_concurrent_args_helper(self.session, statement, [(num_statements,)], results_generator=True) + + # iterate over all the result and make sure we find the correct number. + found_results = 0 + for result_tuple in paged_results_gen: + paged_result = result_tuple[1] + for _ in paged_result: + found_results += 1 + + self.assertEqual(found_results, num_statements) + def test_first_failure(self): statements = cycle(("INSERT INTO test3rf.test (k, v) VALUES (%s, %s)", )) parameters = [(i, i) for i in range(100)] diff --git a/tests/unit/test_concurrent.py b/tests/unit/test_concurrent.py new file mode 100644 index 0000000000..5e23f584a0 --- /dev/null +++ b/tests/unit/test_concurrent.py @@ -0,0 +1,227 @@ +# Copyright 2013-2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa +from itertools import cycle +from mock import Mock +import time +import threading +from six.moves.queue import PriorityQueue + +from cassandra.concurrent import execute_concurrent, execute_concurrent_with_args + + +class MockResponseResponseFuture(): + """ + This is a mock ResponseFuture. It is used to allow us to hook into the underlying session + and invoke callback with various timing. + """ + + # a list pending callbacks, these will be prioritized in reverse or normal orderd + pending_callbacks = PriorityQueue() + + def __init__(self, reverse): + + # if this is true invoke callback in the reverse order then what they were insert + self.reverse = reverse + # hardcoded to avoid paging logic + self.has_more_pages = False + + if(reverse): + self.priority = 100 + else: + self.priority = 0 + + def add_callback(self, fn, *args, **kwargs): + """ + This is used to add a callback our pending list of callbacks. + If reverse is specified we will invoke the callback in the opposite order that we added it + """ + time_added = time.time() + self.pending_callbacks.put((self.priority, (fn, args, kwargs, time_added))) + if not reversed: + self.priority += 1 + else: + self.priority -= 1 + + def add_callbacks(self, callback, errback, + callback_args=(), callback_kwargs=None, + errback_args=(), errback_kwargs=None): + + self.add_callback(callback, *callback_args, **(callback_kwargs or {})) + + def get_next_callback(self): + return self.pending_callbacks.get() + + def has_next_callback(self): + return not self.pending_callbacks.empty() + + def has_more_pages(self): + return False + + def clear_callbacks(self): + return + + +class TimedCallableInvoker(threading.Thread): + """ + This is a local thread which is runs and invokes all the callbacks on the pending callback queue. + The slowdown flag can used to invoke random slowdowns in our simulate queries. + """ + def __init__(self, handler, slowdown=False): + super(TimedCallableInvoker, self).__init__() + self.slowdown = slowdown + self._stop = threading.Event() + self.handler = handler + + def stop(self): + self._stop.set() + + def stopped(self): + return self._stop.isSet() + + def run(self): + while(not self.stopped()): + if(self.handler.has_next_callback()): + pending_callback = self.handler.get_next_callback() + priority_num = pending_callback[0] + if (priority_num % 10) == 0 and self.slowdown: + self._stop.wait(.1) + callback_args = pending_callback[1] + fn, args, kwargs, time_added = callback_args + fn(time_added, *args, **kwargs) + self._stop.wait(.001) + return + + +class ConcurrencyTest((unittest.TestCase)): + + def test_results_ordering_forward(self): + """ + This tests the ordering of our various concurrent generator class ConcurrentExecutorListResults + when queries complete in the order they were executed. + """ + self.insert_and_validate_list_results(False, False) + + def test_results_ordering_reverse(self): + """ + This tests the ordering of our various concurrent generator class ConcurrentExecutorListResults + when queries complete in the reverse order they were executed. + """ + self.insert_and_validate_list_results(True, False) + + def test_results_ordering_forward_slowdown(self): + """ + This tests the ordering of our various concurrent generator class ConcurrentExecutorListResults + when queries complete in the order they were executed, with slow queries mixed in. + """ + self.insert_and_validate_list_results(False, True) + + def test_results_ordering_reverse_slowdown(self): + """ + This tests the ordering of our various concurrent generator class ConcurrentExecutorListResults + when queries complete in the reverse order they were executed, with slow queries mixed in. + """ + self.insert_and_validate_list_results(True, True) + + def test_results_ordering_forward_generator(self): + """ + This tests the ordering of our various concurrent generator class ConcurrentExecutorGenResults + when queries complete in the order they were executed. + """ + self.insert_and_validate_list_generator(False, False) + + def test_results_ordering_reverse_generator(self): + """ + This tests the ordering of our various concurrent generator class ConcurrentExecutorGenResults + when queries complete in the reverse order they were executed. + """ + self.insert_and_validate_list_generator(True, False) + + def test_results_ordering_forward_generator_slowdown(self): + """ + This tests the ordering of our various concurrent generator class ConcurrentExecutorGenResults + when queries complete in the order they were executed, with slow queries mixed in. + """ + self.insert_and_validate_list_generator(False, True) + + def test_results_ordering_reverse_generator_slowdown(self): + """ + This tests the ordering of our various concurrent generator class ConcurrentExecutorGenResults + when queries complete in the reverse order they were executed, with slow queries mixed in. + """ + self.insert_and_validate_list_generator(True, True) + + def insert_and_validate_list_results(self, reverse, slowdown): + """ + This utility method will execute submit various statements for execution using the ConcurrentExecutorListResults, + then invoke a separate thread to execute the callback associated with the futures registered + for those statements. The parameters will toggle various timing, and ordering changes. + Finally it will validate that the results were returned in the order they were submitted + :param reverse: Execute the callbacks in the opposite order that they were submitted + :param slowdown: Cause intermittent queries to perform slowly + """ + our_handler = MockResponseResponseFuture(reverse=reverse) + mock_session = Mock() + statements_and_params = zip(cycle(["INSERT INTO test3rf.test (k, v) VALUES (%s, 0)"]), + [(i, ) for i in range(100)]) + mock_session.execute_async.return_value = our_handler + + t = TimedCallableInvoker(our_handler, slowdown=slowdown) + t.start() + results = execute_concurrent(mock_session, statements_and_params) + + while(not our_handler.pending_callbacks.empty()): + time.sleep(.01) + t.stop() + self.validate_result_ordering(results) + + def insert_and_validate_list_generator(self, reverse, slowdown): + """ + This utility method will execute submit various statements for execution using the ConcurrentExecutorGenResults, + then invoke a separate thread to execute the callback associated with the futures registered + for those statements. The parameters will toggle various timing, and ordering changes. + Finally it will validate that the results were returned in the order they were submitted + :param reverse: Execute the callbacks in the opposite order that they were submitted + :param slowdown: Cause intermittent queries to perform slowly + """ + our_handler = MockResponseResponseFuture(reverse=reverse) + mock_session = Mock() + statements_and_params = zip(cycle(["INSERT INTO test3rf.test (k, v) VALUES (%s, 0)"]), + [(i, ) for i in range(100)]) + mock_session.execute_async.return_value = our_handler + + t = TimedCallableInvoker(our_handler, slowdown=slowdown) + t.start() + results = execute_concurrent(mock_session, statements_and_params, results_generator=True) + + self.validate_result_ordering(results) + t.stop() + + def validate_result_ordering(self, results): + """ + This method will validate that the timestamps returned from the result are in order. This indicates that the + results were returned in the order they were submitted for execution + :param results: + """ + last_time_added = 0 + for result in results: + current_time_added = result[1] + self.assertLess(last_time_added, current_time_added) + last_time_added = current_time_added + From 17f2bc13c2fda731e9de8217790730b151f962ec Mon Sep 17 00:00:00 2001 From: Kevin Deldycke Date: Wed, 22 Jul 2015 15:10:54 +0200 Subject: [PATCH 1590/3726] Unit-tests CQLengine's previous value tracking. Test previous_value of an already-persisted object as well as one that was just instanciated. Refs PYTHON-348. --- .../cqlengine/model/test_model_io.py | 74 +++++++++++++++++++ 1 file changed, 74 insertions(+) diff --git a/tests/integration/cqlengine/model/test_model_io.py b/tests/integration/cqlengine/model/test_model_io.py index 3ba2aa9711..b6f76599e7 100644 --- a/tests/integration/cqlengine/model/test_model_io.py +++ b/tests/integration/cqlengine/model/test_model_io.py @@ -347,6 +347,80 @@ def test_get_changed_columns(self): self.instance.save() assert self.instance.get_changed_columns() == [] + def test_previous_value_tracking_of_persisted_instance(self): + # Check initial internal states. + assert self.instance.get_changed_columns() == [] + assert self.instance._values['count'].previous_value == 0 + + # Change value and check internal states. + self.instance.count = 1 + assert self.instance.get_changed_columns() == ['count'] + assert self.instance._values['count'].previous_value == 0 + + # Internal states should be updated on save. + self.instance.save() + assert self.instance.get_changed_columns() == [] + assert self.instance._values['count'].previous_value == 1 + + # Change value twice. + self.instance.count = 2 + assert self.instance.get_changed_columns() == ['count'] + assert self.instance._values['count'].previous_value == 1 + self.instance.count = 3 + assert self.instance.get_changed_columns() == ['count'] + assert self.instance._values['count'].previous_value == 1 + + # Internal states updated on save. + self.instance.save() + assert self.instance.get_changed_columns() == [] + assert self.instance._values['count'].previous_value == 3 + + # Change value and reset it. + self.instance.count = 2 + assert self.instance.get_changed_columns() == ['count'] + assert self.instance._values['count'].previous_value == 3 + self.instance.count = 3 + assert self.instance.get_changed_columns() == [] + assert self.instance._values['count'].previous_value == 3 + + # Nothing to save: values in initial conditions. + self.instance.save() + assert self.instance.get_changed_columns() == [] + assert self.instance._values['count'].previous_value == 3 + + def test_previous_value_tracking_on_instanciation(self): + self.instance = TestMultiKeyModel( + partition=random.randint(0, 1000), + cluster=random.randint(0, 1000), + count=0, + text='happy') + + # Columns of instances not persisted yet should be marked as changed. + assert set(self.instance.get_changed_columns()) == set([ + 'partition', 'cluster', 'count', 'text']) + assert self.instance._values['partition'].previous_value is None + assert self.instance._values['cluster'].previous_value is None + assert self.instance._values['count'].previous_value is None + assert self.instance._values['text'].previous_value is None + + # Value changes doesn't affect internal states. + self.instance.count = 1 + assert 'count' in self.instance.get_changed_columns() + assert self.instance._values['count'].previous_value is None + self.instance.count = 2 + assert 'count' in self.instance.get_changed_columns() + assert self.instance._values['count'].previous_value is None + + # Value reset is properly tracked. + self.instance.count = None + assert 'count' not in self.instance.get_changed_columns() + assert self.instance._values['count'].previous_value == None + + self.instance.save() + assert self.instance.get_changed_columns() == [] + assert self.instance._values['count'].previous_value == None + assert self.instance.count is None + class TestCanUpdate(BaseCassEngTestCase): From 03d20d28cdb04f64035165a08bb93c203bc1a4ba Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 22 Jul 2015 11:58:49 -0500 Subject: [PATCH 1591/3726] don't build embedded extensions for gevent in tox config was failing on these for some reason also, not bothering with eventlet dep since it can't run without monkey patching --- tox.ini | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tox.ini b/tox.ini index ab2b18a16f..0f97b2416f 100644 --- a/tox.ini +++ b/tox.ini @@ -13,10 +13,11 @@ deps = {[base]deps} blist {env:CYTHON_DEP:} py26: unittest2 - py{26,27}: twisted - py{26,27}: eventlet py{26,27}: gevent + twisted setenv = USE_CASS_EXTERNAL=1 + LIBEV_EMBED=0 + CARES_EMBED=0 changedir = {envtmpdir} commands = nosetests --verbosity=2 --no-path-adjustment {toxinidir}/tests/unit/ nosetests --verbosity=2 --no-path-adjustment {toxinidir}/tests/integration/cqlengine From 94b8f4d15dd6def3d600e1e6a329a4f81d3984c5 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Wed, 22 Jul 2015 14:48:33 -0500 Subject: [PATCH 1592/3726] doc: Add smallint, tinyint to getting started types table --- docs/getting_started.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/getting_started.rst b/docs/getting_started.rst index fa5cda32ce..9e4384ce2f 100644 --- a/docs/getting_started.rst +++ b/docs/getting_started.rst @@ -212,6 +212,8 @@ following way: | | ``int`` | | ``int`` | | | ``long`` | | ``bigint`` | | | | ``varint`` | + | | | ``smallint`` | + | | | ``tinyint`` | | | | ``counter`` | +--------------------+-------------------------+ | ``decimal.Decimal``| ``decimal`` | From 33e9a99627878e448b8ca40dce71c8a61cb071c8 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 17 Jul 2015 13:34:48 -0500 Subject: [PATCH 1593/3726] Remove unused CassandraType init, validate --- cassandra/cqltypes.py | 61 +------------- tests/unit/test_types.py | 150 +--------------------------------- tests/unit/test_util_types.py | 147 +++++++++++++++++++++++++++++++++ 3 files changed, 149 insertions(+), 209 deletions(-) create mode 100644 tests/unit/test_util_types.py diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index 77fc2b911b..2d858e52df 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -213,21 +213,9 @@ class _CassandraType(object): of EmptyValue) will be returned. """ - def __init__(self, val): - self.val = self.validate(val) - def __repr__(self): return '<%s( %r )>' % (self.cql_parameterized_type(), self.val) - @staticmethod - def validate(val): - """ - Called to transform an input value into one of a suitable type - for this class. As an example, the BooleanType class uses this - to convert an incoming value to True or False. - """ - return val - @classmethod def from_binary(cls, byts, protocol_version): """ @@ -358,10 +346,6 @@ class BytesType(_CassandraType): typename = 'blob' empty_binary_ok = True - @staticmethod - def validate(val): - return bytearray(val) - @staticmethod def serialize(val, protocol_version): return six.binary_type(val) @@ -370,10 +354,6 @@ def serialize(val, protocol_version): class DecimalType(_CassandraType): typename = 'decimal' - @staticmethod - def validate(val): - return Decimal(val) - @staticmethod def deserialize(byts, protocol_version): scale = int32_unpack(byts[:4]) @@ -412,10 +392,6 @@ def serialize(uuid, protocol_version): class BooleanType(_CassandraType): typename = 'boolean' - @staticmethod - def validate(val): - return bool(val) - @staticmethod def deserialize(byts, protocol_version): return bool(int8_unpack(byts)) @@ -556,14 +532,9 @@ class CounterColumnType(LongType): class DateType(_CassandraType): typename = 'timestamp' - @classmethod - def validate(cls, val): - if isinstance(val, six.string_types): - val = cls.interpret_datestring(val) - return val - @staticmethod def interpret_datestring(val): + # not used internally. deprecate? if val[-5] in ('+', '-'): offset = (int(val[-4:-2]) * 3600 + int(val[-2:]) * 60) * int(val[-5] + '1') val = val[:-5] @@ -579,9 +550,6 @@ def interpret_datestring(val): else: raise ValueError("can't interpret %r as a date" % (val,)) - def my_timestamp(self): - return self.val - @staticmethod def deserialize(byts, protocol_version): timestamp = int64_unpack(byts) / 1000.0 @@ -633,12 +601,6 @@ class SimpleDateType(_CassandraType): # range (2^31). EPOCH_OFFSET_DAYS = 2 ** 31 - @classmethod - def validate(cls, val): - if not isinstance(val, util.Date): - val = util.Date(val) - return val - @staticmethod def deserialize(byts, protocol_version): days = uint32_unpack(byts) - SimpleDateType.EPOCH_OFFSET_DAYS @@ -668,12 +630,6 @@ def serialize(byts, protocol_version): class TimeType(_CassandraType): typename = 'time' - @classmethod - def validate(cls, val): - if not isinstance(val, util.Time): - val = util.Time(val) - return val - @staticmethod def deserialize(byts, protocol_version): return util.Time(int64_unpack(byts)) @@ -709,11 +665,6 @@ class VarcharType(UTF8Type): class _ParameterizedType(_CassandraType): - def __init__(self, val): - if not self.subtypes: - raise ValueError("%s type with no parameters can't be instantiated" % (self.typename,)) - _CassandraType.__init__(self, val) - @classmethod def deserialize(cls, byts, protocol_version): if not cls.subtypes: @@ -730,11 +681,6 @@ def serialize(cls, val, protocol_version): class _SimpleParameterizedType(_ParameterizedType): - @classmethod - def validate(cls, val): - subtype, = cls.subtypes - return cls.adapter([subtype.validate(subval) for subval in val]) - @classmethod def deserialize_safe(cls, byts, protocol_version): subtype, = cls.subtypes @@ -787,11 +733,6 @@ class MapType(_ParameterizedType): typename = 'map' num_subtypes = 2 - @classmethod - def validate(cls, val): - key_type, value_type = cls.subtypes - return dict((key_type.validate(k), value_type.validate(v)) for (k, v) in six.iteritems(val)) - @classmethod def deserialize_safe(cls, byts, protocol_version): key_type, value_type = cls.subtypes diff --git a/tests/unit/test_types.py b/tests/unit/test_types.py index 77667705d0..18d40956c7 100644 --- a/tests/unit/test_types.py +++ b/tests/unit/test_types.py @@ -105,11 +105,6 @@ def test_lookup_casstype(self): self.assertRaises(ValueError, lookup_casstype, 'AsciiType~') - # TODO: Do a few more tests - # "I would say some parameterized and nested types would be good to test, - # like "MapType(AsciiType, IntegerType)" and "ReversedType(AsciiType)" - self.assertEqual(str(lookup_casstype(BooleanType(True))), str(BooleanType(True))) - def test_casstype_parameterized(self): self.assertEqual(LongType.cass_parameterized_type_with(()), 'LongType') self.assertEqual(LongType.cass_parameterized_type_with((), full=True), 'org.apache.cassandra.db.marshal.LongType') @@ -125,134 +120,7 @@ def test_datetype_from_string(self): # Ensure all formats can be parsed, without exception for format in cassandra.cqltypes.cql_timestamp_formats: date_string = str(datetime.datetime.now().strftime(format)) - cassandra.cqltypes.DateType(date_string) - - def test_simpledate(self): - """ - Test cassandra.cqltypes.SimpleDateType() construction - """ - Date = cassandra.util.Date - # from datetime - expected_dt = datetime.datetime(1492, 10, 12, 1, 1) - expected_date = Date(expected_dt) - self.assertEqual(str(expected_date), '1492-10-12') - - # from string - sd = SimpleDateType('1492-10-12') - self.assertEqual(sd.val, expected_date) - sd = SimpleDateType('+1492-10-12') - self.assertEqual(sd.val, expected_date) - - # Date - sd = SimpleDateType(expected_date) - self.assertEqual(sd.val, expected_date) - - # date - sd = SimpleDateType(datetime.date(expected_dt.year, expected_dt.month, expected_dt.day)) - self.assertEqual(sd.val, expected_date) - - # days - sd = SimpleDateType(0) - self.assertEqual(sd.val, Date(datetime.date(1970, 1, 1))) - sd = SimpleDateType(-1) - self.assertEqual(sd.val, Date(datetime.date(1969, 12, 31))) - sd = SimpleDateType(1) - self.assertEqual(sd.val, Date(datetime.date(1970, 1, 2))) - # limits - min_builtin = Date(datetime.date(1, 1, 1)) - max_builtin = Date(datetime.date(9999, 12, 31)) - self.assertEqual(SimpleDateType(min_builtin.days_from_epoch).val, min_builtin) - self.assertEqual(SimpleDateType(max_builtin.days_from_epoch).val, max_builtin) - # just proving we can construct with on offset outside buildin range - self.assertEqual(SimpleDateType(min_builtin.days_from_epoch - 1).val.days_from_epoch, - min_builtin.days_from_epoch - 1) - self.assertEqual(SimpleDateType(max_builtin.days_from_epoch + 1).val.days_from_epoch, - max_builtin.days_from_epoch + 1) - - # no contruct - self.assertRaises(ValueError, SimpleDateType, '-1999-10-10') - self.assertRaises(TypeError, SimpleDateType, 1.234) - - # str - date_str = '2015-03-16' - self.assertEqual(str(Date(date_str)), date_str) - # out of range - self.assertEqual(str(Date(2932897)), '2932897') - self.assertEqual(repr(Date(1)), 'Date(1)') - - # eq other types - self.assertEqual(Date(1234), 1234) - self.assertEqual(Date(1), datetime.date(1970, 1, 2)) - self.assertFalse(Date(2932897) == datetime.date(9999, 12, 31)) # date can't represent year > 9999 - self.assertEqual(Date(2932897), 2932897) - - def test_time(self): - """ - Test cassandra.cqltypes.TimeType() construction - """ - Time = cassandra.util.Time - one_micro = 1000 - one_milli = 1000 * one_micro - one_second = 1000 * one_milli - one_minute = 60 * one_second - one_hour = 60 * one_minute - - # from strings - tt = TimeType('00:00:00.000000001') - self.assertEqual(tt.val, 1) - tt = TimeType('00:00:00.000001') - self.assertEqual(tt.val, one_micro) - tt = TimeType('00:00:00.001') - self.assertEqual(tt.val, one_milli) - tt = TimeType('00:00:01') - self.assertEqual(tt.val, one_second) - tt = TimeType('00:01:00') - self.assertEqual(tt.val, one_minute) - tt = TimeType('01:00:00') - self.assertEqual(tt.val, one_hour) - tt = TimeType('01:00:00.') - self.assertEqual(tt.val, one_hour) - - tt = TimeType('23:59:59.1') - tt = TimeType('23:59:59.12') - tt = TimeType('23:59:59.123') - tt = TimeType('23:59:59.1234') - tt = TimeType('23:59:59.12345') - - tt = TimeType('23:59:59.123456') - self.assertEqual(tt.val, 23 * one_hour + 59 * one_minute + 59 * one_second + 123 * one_milli + 456 * one_micro) - - tt = TimeType('23:59:59.1234567') - self.assertEqual(tt.val, 23 * one_hour + 59 * one_minute + 59 * one_second + 123 * one_milli + 456 * one_micro + 700) - - tt = TimeType('23:59:59.12345678') - self.assertEqual(tt.val, 23 * one_hour + 59 * one_minute + 59 * one_second + 123 * one_milli + 456 * one_micro + 780) - - tt = TimeType('23:59:59.123456789') - self.assertEqual(tt.val, 23 * one_hour + 59 * one_minute + 59 * one_second + 123 * one_milli + 456 * one_micro + 789) - - # from int - tt = TimeType(12345678) - self.assertEqual(tt.val, 12345678) - - # from time - expected_time = datetime.time(12, 1, 2, 3) - tt = TimeType(expected_time) - self.assertEqual(tt.val, expected_time) - - # util.Time self equality - self.assertEqual(Time(1234), Time(1234)) - - # str - time_str = '12:13:14.123456789' - self.assertEqual(str(Time(time_str)), time_str) - self.assertEqual(repr(Time(1)), 'Time(1)') - - # no construct - self.assertRaises(ValueError, TimeType, '1999-10-10 11:11:11.1234') - self.assertRaises(TypeError, TimeType, 1.234) - self.assertRaises(ValueError, TimeType, 123456789000000) - self.assertRaises(TypeError, TimeType, datetime.datetime(2004, 12, 23, 11, 11, 1)) + cassandra.cqltypes.DateType.interpret_datestring(date_string) def test_cql_typename(self): """ @@ -309,12 +177,6 @@ class BarType(FooType): def test_empty_value(self): self.assertEqual(str(EmptyValue()), 'EMPTY') - def test_cassandratype_base(self): - cassandra_type = _CassandraType('randomvaluetocheck') - self.assertEqual(cassandra_type.val, 'randomvaluetocheck') - self.assertEqual(cassandra_type.validate('randomvaluetocheck2'), 'randomvaluetocheck2') - self.assertEqual(cassandra_type.val, 'randomvaluetocheck') - def test_datetype(self): now_time_seconds = time.time() now_datetime = datetime.datetime.utcfromtimestamp(now_time_seconds) @@ -325,14 +187,6 @@ def test_datetype(self): # same results serialized self.assertEqual(DateType.serialize(now_datetime, 0), DateType.serialize(now_timestamp, 0)) - # from timestamp - date_type = DateType(now_timestamp) - self.assertEqual(date_type.my_timestamp(), now_timestamp) - - # from datetime object - date_type = DateType(now_datetime) - self.assertEqual(date_type.my_timestamp(), now_datetime) - # deserialize # epoc expected = 0 @@ -346,8 +200,6 @@ def test_datetype(self): expected = -770172256 self.assertEqual(DateType.deserialize(int64_pack(1000 * expected), 0), datetime.datetime(1945, 8, 5, 23, 15, 44)) - self.assertRaises(ValueError, date_type.interpret_datestring, 'fakestring') - # work around rounding difference among Python versions (PYTHON-230) expected = 1424817268.274 self.assertEqual(DateType.deserialize(int64_pack(int(1000 * expected)), 0), datetime.datetime(2015, 2, 24, 22, 34, 28, 274000)) diff --git a/tests/unit/test_util_types.py b/tests/unit/test_util_types.py new file mode 100644 index 0000000000..7cf6c1f4ba --- /dev/null +++ b/tests/unit/test_util_types.py @@ -0,0 +1,147 @@ +# Copyright 2013-2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa + +import datetime + +from cassandra.util import Date, Time + + +class DateTests(unittest.TestCase): + + def test_from_datetime(self): + expected_date = datetime.date(1492, 10, 12) + d = Date(expected_date) + self.assertEqual(str(d), str(expected_date)) + + def test_from_string(self): + expected_date = datetime.date(1492, 10, 12) + d = Date(expected_date) + sd = Date('1492-10-12') + self.assertEqual(sd, d) + sd = Date('+1492-10-12') + self.assertEqual(sd, d) + + def test_from_date(self): + expected_date = datetime.date(1492, 10, 12) + d = Date(expected_date) + self.assertEqual(d.date(), expected_date) + + def test_from_days(self): + sd = Date(0) + self.assertEqual(sd, Date(datetime.date(1970, 1, 1))) + sd = Date(-1) + self.assertEqual(sd, Date(datetime.date(1969, 12, 31))) + sd = Date(1) + self.assertEqual(sd, Date(datetime.date(1970, 1, 2))) + + def test_limits(self): + min_builtin = Date(datetime.date(1, 1, 1)) + max_builtin = Date(datetime.date(9999, 12, 31)) + self.assertEqual(Date(min_builtin.days_from_epoch), min_builtin) + self.assertEqual(Date(max_builtin.days_from_epoch), max_builtin) + # just proving we can construct with on offset outside buildin range + self.assertEqual(Date(min_builtin.days_from_epoch - 1).days_from_epoch, + min_builtin.days_from_epoch - 1) + self.assertEqual(Date(max_builtin.days_from_epoch + 1).days_from_epoch, + max_builtin.days_from_epoch + 1) + + def test_invalid_init(self): + self.assertRaises(ValueError, Date, '-1999-10-10') + self.assertRaises(TypeError, Date, 1.234) + + def test_str(self): + date_str = '2015-03-16' + self.assertEqual(str(Date(date_str)), date_str) + + def test_out_of_range(self): + self.assertEqual(str(Date(2932897)), '2932897') + self.assertEqual(repr(Date(1)), 'Date(1)') + + def test_equals(self): + self.assertEqual(Date(1234), 1234) + self.assertEqual(Date(1), datetime.date(1970, 1, 2)) + self.assertFalse(Date(2932897) == datetime.date(9999, 12, 31)) # date can't represent year > 9999 + self.assertEqual(Date(2932897), 2932897) + + +class TimeTests(unittest.TestCase): + + def test_units_from_string(self): + one_micro = 1000 + one_milli = 1000 * one_micro + one_second = 1000 * one_milli + one_minute = 60 * one_second + one_hour = 60 * one_minute + + tt = Time('00:00:00.000000001') + self.assertEqual(tt.nanosecond_time, 1) + tt = Time('00:00:00.000001') + self.assertEqual(tt.nanosecond_time, one_micro) + tt = Time('00:00:00.001') + self.assertEqual(tt.nanosecond_time, one_milli) + tt = Time('00:00:01') + self.assertEqual(tt.nanosecond_time, one_second) + tt = Time('00:01:00') + self.assertEqual(tt.nanosecond_time, one_minute) + tt = Time('01:00:00') + self.assertEqual(tt.nanosecond_time, one_hour) + tt = Time('01:00:00.') + self.assertEqual(tt.nanosecond_time, one_hour) + + tt = Time('23:59:59.123456') + self.assertEqual(tt.nanosecond_time, 23 * one_hour + 59 * one_minute + 59 * one_second + 123 * one_milli + 456 * one_micro) + + tt = Time('23:59:59.1234567') + self.assertEqual(tt.nanosecond_time, 23 * one_hour + 59 * one_minute + 59 * one_second + 123 * one_milli + 456 * one_micro + 700) + + tt = Time('23:59:59.12345678') + self.assertEqual(tt.nanosecond_time, 23 * one_hour + 59 * one_minute + 59 * one_second + 123 * one_milli + 456 * one_micro + 780) + + tt = Time('23:59:59.123456789') + self.assertEqual(tt.nanosecond_time, 23 * one_hour + 59 * one_minute + 59 * one_second + 123 * one_milli + 456 * one_micro + 789) + + def test_micro_precision(self): + Time('23:59:59.1') + Time('23:59:59.12') + Time('23:59:59.123') + Time('23:59:59.1234') + Time('23:59:59.12345') + + def test_from_int(self): + tt = Time(12345678) + self.assertEqual(tt.nanosecond_time, 12345678) + + def test_from_time(self): + expected_time = datetime.time(12, 1, 2, 3) + tt = Time(expected_time) + self.assertEqual(tt, expected_time) + + def test_equals(self): + # util.Time self equality + self.assertEqual(Time(1234), Time(1234)) + + def test_str_repr(self): + time_str = '12:13:14.123456789' + self.assertEqual(str(Time(time_str)), time_str) + self.assertEqual(repr(Time(1)), 'Time(1)') + + def test_invalid_init(self): + self.assertRaises(ValueError, Time, '1999-10-10 11:11:11.1234') + self.assertRaises(TypeError, Time, 1.234) + self.assertRaises(ValueError, Time, 123456789000000) + self.assertRaises(TypeError, Time, datetime.datetime(2004, 12, 23, 11, 11, 1)) From 6056b36bc8b92cc106576af44c25ebae0a524eec Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 17 Jul 2015 14:29:25 -0500 Subject: [PATCH 1594/3726] for RoundRobin return iterator instead of materialized list Most of time, only first element will be consumed. --- cassandra/policies.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/policies.py b/cassandra/policies.py index 328b6b82fb..8132f8ab8e 100644 --- a/cassandra/policies.py +++ b/cassandra/policies.py @@ -173,7 +173,7 @@ def make_query_plan(self, working_keyspace=None, query=None): length = len(hosts) if length: pos %= length - return list(islice(cycle(hosts), pos, pos + length)) + return islice(cycle(hosts), pos, pos + length) else: return [] From c4abf2031325df100e202f9e71e7d03b09d2080a Mon Sep 17 00:00:00 2001 From: GregBestland Date: Wed, 22 Jul 2015 16:18:54 -0500 Subject: [PATCH 1595/3726] Changing tests to be cross python compatible --- .../standard/test_custom_protocol_handler.py | 4 +++- tests/unit/test_concurrent.py | 10 +++++----- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/tests/integration/standard/test_custom_protocol_handler.py b/tests/integration/standard/test_custom_protocol_handler.py index 63d4b1991a..61a23831b7 100644 --- a/tests/integration/standard/test_custom_protocol_handler.py +++ b/tests/integration/standard/test_custom_protocol_handler.py @@ -22,6 +22,8 @@ from cassandra.cluster import Cluster from tests.integration import use_singledc, PROTOCOL_VERSION, execute_until_pass from tests.integration.datatype_utils import update_datatypes, PRIMITIVE_DATATYPES, get_sample +from six import binary_type + import uuid @@ -74,7 +76,7 @@ def test_custom_raw_uuid_row_results(self): result_set = session.execute("SELECT schema_version FROM system.local") result = result_set.pop() raw_value = result.pop() - self.assertEqual(type(raw_value), str) + self.assertTrue(isinstance(raw_value, binary_type)) self.assertEqual(len(raw_value), 16) # Ensure that we get normal uuid back when we re-connect diff --git a/tests/unit/test_concurrent.py b/tests/unit/test_concurrent.py index 5e23f584a0..0bdb1f9e4d 100644 --- a/tests/unit/test_concurrent.py +++ b/tests/unit/test_concurrent.py @@ -86,14 +86,14 @@ class TimedCallableInvoker(threading.Thread): def __init__(self, handler, slowdown=False): super(TimedCallableInvoker, self).__init__() self.slowdown = slowdown - self._stop = threading.Event() + self._stopper = threading.Event() self.handler = handler def stop(self): - self._stop.set() + self._stopper.set() def stopped(self): - return self._stop.isSet() + return self._stopper.isSet() def run(self): while(not self.stopped()): @@ -101,11 +101,11 @@ def run(self): pending_callback = self.handler.get_next_callback() priority_num = pending_callback[0] if (priority_num % 10) == 0 and self.slowdown: - self._stop.wait(.1) + self._stopper.wait(.1) callback_args = pending_callback[1] fn, args, kwargs, time_added = callback_args fn(time_added, *args, **kwargs) - self._stop.wait(.001) + self._stopper.wait(.001) return From 8afd853ae984bd5ef0fd2369dbd7d2a7c604e01f Mon Sep 17 00:00:00 2001 From: Mark Florisson Date: Thu, 23 Jul 2015 15:43:57 +0100 Subject: [PATCH 1596/3726] Add typecodes to module with Cython-compatible .pxd file --- cassandra/protocol.py | 34 ++-------------------- cassandra/typecodes.pxd | 28 +++++++++++++++++++ cassandra/typecodes.py | 62 +++++++++++++++++++++++++++++++++++++++++ 3 files changed, 93 insertions(+), 31 deletions(-) create mode 100644 cassandra/typecodes.pxd create mode 100644 cassandra/typecodes.py diff --git a/cassandra/protocol.py b/cassandra/protocol.py index 41439334d9..a6ce22ec08 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -22,6 +22,7 @@ from six.moves import range import io +from cassandra import typecodes from cassandra import (Unavailable, WriteTimeout, ReadTimeout, WriteFailure, ReadFailure, FunctionFailure, AlreadyExists, InvalidRequest, Unauthorized, @@ -35,7 +36,7 @@ DoubleType, FloatType, Int32Type, InetAddressType, IntegerType, ListType, LongType, MapType, SetType, TimeUUIDType, - UTF8Type, UUIDType, UserType, + UTF8Type, VarcharType, UUIDType, UserType, TupleType, lookup_casstype, SimpleDateType, TimeType, ByteType, ShortType) from cassandra.policies import WriteType @@ -531,35 +532,6 @@ def send_body(self, f, protocol_version): RESULT_KIND_PREPARED = 0x0004 RESULT_KIND_SCHEMA_CHANGE = 0x0005 -class CassandraTypeCodes(object): - CUSTOM_TYPE = 0x0000 - AsciiType = 0x0001 - LongType = 0x0002 - BytesType = 0x0003 - BooleanType = 0x0004 - CounterColumnType = 0x0005 - DecimalType = 0x0006 - DoubleType = 0x0007 - FloatType = 0x0008 - Int32Type = 0x0009 - UTF8Type = 0x000A - DateType = 0x000B - UUIDType = 0x000C - UTF8Type = 0x000D - IntegerType = 0x000E - TimeUUIDType = 0x000F - InetAddressType = 0x0010 - SimpleDateType = 0x0011 - TimeType = 0x0012 - ShortType = 0x0013 - ByteType = 0x0014 - ListType = 0x0020 - MapType = 0x0021 - SetType = 0x0022 - UserType = 0x0030 - TupleType = 0x0031 - - class ResultMessage(_MessageType): opcode = 0x08 name = 'RESULT' @@ -569,7 +541,7 @@ class ResultMessage(_MessageType): paging_state = None # Names match type name in module scope. Most are imported from cassandra.cqltypes (except CUSTOM_TYPE) - type_codes = _cqltypes_by_code = dict((v, globals()[k]) for k, v in CassandraTypeCodes.__dict__.items() if not k.startswith('_')) + type_codes = _cqltypes_by_code = dict((v, globals()[k]) for k, v in typecodes.__dict__.items() if not k.startswith('_')) _FLAGS_GLOBAL_TABLES_SPEC = 0x0001 _HAS_MORE_PAGES_FLAG = 0x0002 diff --git a/cassandra/typecodes.pxd b/cassandra/typecodes.pxd new file mode 100644 index 0000000000..b040528400 --- /dev/null +++ b/cassandra/typecodes.pxd @@ -0,0 +1,28 @@ +cdef enum: + CUSTOM_TYPE + AsciiType + LongType + BytesType + BooleanType + CounterColumnType + DecimalType + DoubleType + FloatType + Int32Type + UTF8Type + DateType + UUIDType + VarcharType + IntegerType + TimeUUIDType + InetAddressType + SimpleDateType + TimeType + ShortType + ByteType + ListType + MapType + SetType + UserType + TupleType + diff --git a/cassandra/typecodes.py b/cassandra/typecodes.py new file mode 100644 index 0000000000..651c58d765 --- /dev/null +++ b/cassandra/typecodes.py @@ -0,0 +1,62 @@ +""" +Module with constants for Cassandra type codes. + +These constants are useful for + + a) mapping messages to cqltypes (cassandra/cqltypes.py) + b) optimizezd dispatching for (de)serialization (cassandra/encoding.py) + +Type codes are repeated here from the Cassandra binary protocol specification: + + 0x0000 Custom: the value is a [string], see above. + 0x0001 Ascii + 0x0002 Bigint + 0x0003 Blob + 0x0004 Boolean + 0x0005 Counter + 0x0006 Decimal + 0x0007 Double + 0x0008 Float + 0x0009 Int + 0x000A Text + 0x000B Timestamp + 0x000C Uuid + 0x000D Varchar + 0x000E Varint + 0x000F Timeuuid + 0x0010 Inet + 0x0020 List: the value is an [option], representing the type + of the elements of the list. + 0x0021 Map: the value is two [option], representing the types of the + keys and values of the map + 0x0022 Set: the value is an [option], representing the type + of the elements of the set +""" + +CUSTOM_TYPE = 0x0000 +AsciiType = 0x0001 +LongType = 0x0002 +BytesType = 0x0003 +BooleanType = 0x0004 +CounterColumnType = 0x0005 +DecimalType = 0x0006 +DoubleType = 0x0007 +FloatType = 0x0008 +Int32Type = 0x0009 +UTF8Type = 0x000A +DateType = 0x000B +UUIDType = 0x000C +VarcharType = 0x000D +IntegerType = 0x000E +TimeUUIDType = 0x000F +InetAddressType = 0x0010 +SimpleDateType = 0x0011 +TimeType = 0x0012 +ShortType = 0x0013 +ByteType = 0x0014 +ListType = 0x0020 +MapType = 0x0021 +SetType = 0x0022 +UserType = 0x0030 +TupleType = 0x0031 + From f0b360a9c718b5d7c74604788a6092870242efcb Mon Sep 17 00:00:00 2001 From: Mark Florisson Date: Thu, 23 Jul 2015 15:45:28 +0100 Subject: [PATCH 1597/3726] Cythonize marshalling code --- cassandra/marshal.pxd | 29 ++++++ cassandra/marshal.pyx | 201 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 230 insertions(+) create mode 100644 cassandra/marshal.pxd create mode 100644 cassandra/marshal.pyx diff --git a/cassandra/marshal.pxd b/cassandra/marshal.pxd new file mode 100644 index 0000000000..ef7d9858f3 --- /dev/null +++ b/cassandra/marshal.pxd @@ -0,0 +1,29 @@ +from libc.stdint cimport (int8_t, int16_t, int32_t, int64_t, + uint8_t, uint16_t, uint32_t, uint64_t) + +cpdef bytes int64_pack(int64_t x) +cpdef bytes int32_pack(int32_t x) +cpdef bytes int16_pack(int16_t x) +cpdef bytes int8_pack(int8_t x) + +cpdef int64_t int64_unpack(const char *buf) +cpdef int32_t int32_unpack(const char *buf) +cpdef int16_t int16_unpack(const char *buf) +cpdef int8_t int8_unpack(const char *buf) + +cpdef bytes uint64_pack(uint64_t x) +cpdef bytes uint32_pack(uint32_t x) +cpdef bytes uint16_pack(uint16_t x) +cpdef bytes uint8_pack(uint8_t x) + +cpdef uint64_t uint64_unpack(const char *buf) +cpdef uint32_t uint32_unpack(const char *buf) +cpdef uint16_t uint16_unpack(const char *buf) +cpdef uint8_t uint8_unpack(const char *buf) + +cpdef bytes double_pack(double x) +cpdef bytes float_pack(float x) + +cpdef double double_unpack(const char *buf) +cpdef float float_unpack(const char *buf) + diff --git a/cassandra/marshal.pyx b/cassandra/marshal.pyx new file mode 100644 index 0000000000..4803686149 --- /dev/null +++ b/cassandra/marshal.pyx @@ -0,0 +1,201 @@ +# cython: profile=True +# Copyright 2013-2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import six +import sys +import struct +import math + +from libc.stdint cimport (int8_t, int16_t, int32_t, int64_t, + uint8_t, uint16_t, uint32_t, uint64_t) + +assert sys.byteorder in ('little', 'big') +is_little_endian = sys.byteorder == 'little' + +# cdef extern from "marshal.h": +# cdef str c_string_to_python(char *p, Py_ssize_t len) + +def _make_packer(format_string): + packer = struct.Struct(format_string) + pack = packer.pack + unpack = lambda s: packer.unpack(s)[0] + return pack, unpack + + +cdef inline bytes pack(char *buf, Py_ssize_t size): + """ + Pack a buffer, given as a char *, into Python bytes in byte order. + """ + if is_little_endian: + swap_order(buf, size) + return buf[:size] + + +cdef inline swap_order(char *buf, Py_ssize_t size): + """ + Swap the byteorder of `buf` in-place (reverse all the bytes). + There are functions ntohl etc, but these may be POSIX-dependent. + """ + cdef Py_ssize_t start, end + cdef char c + for i in range(size/2): + end = size - i - 1 + c = buf[i] + buf[i] = buf[end] + buf[end] = c + +### Packing and unpacking of signed integers + +cpdef inline bytes int64_pack(int64_t x): + return pack( &x, 8) + +cpdef inline int64_t int64_unpack(const char *buf): + # The 'const' makes sure the buffer is not mutated in-place! + cdef int64_t x = ( buf)[0] + swap_order( &x, 8) + return x + +cpdef inline bytes int32_pack(int32_t x): + return pack( &x, 4) + +cpdef inline int32_t int32_unpack(const char *buf): + cdef int32_t x = ( buf)[0] + swap_order( &x, 4) + return x + +cpdef inline bytes int16_pack(int16_t x): + return pack( &x, 2) + +cpdef inline int16_t int16_unpack(const char *buf): + cdef int16_t x = ( buf)[0] + swap_order( &x, 2) + return x + +cpdef inline bytes int8_pack(int8_t x): + return ( &x)[:1] + +cpdef inline int8_t int8_unpack(const char *buf): + return ( buf)[0] + +cpdef inline bytes uint64_pack(uint64_t x): + return pack( &x, 8) + +cpdef inline uint64_t uint64_unpack(const char *buf): + cdef uint64_t x = ( buf)[0] + swap_order( &x, 8) + return x + +cpdef inline bytes uint32_pack(uint32_t x): + return pack( &x, 4) + +cpdef inline uint32_t uint32_unpack(const char *buf): + cdef uint32_t x = ( buf)[0] + swap_order( &x, 4) + return x + +cpdef inline bytes uint16_pack(uint16_t x): + return pack( &x, 2) + +cpdef inline uint16_t uint16_unpack(const char *buf): + cdef uint16_t x = ( buf)[0] + swap_order( &x, 2) + return x + +cpdef inline bytes uint8_pack(uint8_t x): + return pack( &x, 1) + +cpdef inline uint8_t uint8_unpack(const char *buf): + return ( buf)[0] + +cpdef inline bytes double_pack(double x): + return pack( &x, 8) + +cpdef inline double double_unpack(const char *buf): + cdef double x = ( buf)[0] + swap_order( &x, 8) + return x + +cpdef inline bytes float_pack(float x): + return pack( &x, 4) + +cpdef inline float float_unpack(const char *buf): + cdef float x = ( buf)[0] + swap_order( &x, 4) + return x + +# int64_pack, int64_unpack = _make_packer('>q') +# int32_pack, int32_unpack = _make_packer('>i') +# int16_pack, int16_unpack = _make_packer('>h') +# int8_pack, int8_unpack = _make_packer('>b') +# uint64_pack, uint64_unpack = _make_packer('>Q') +# uint32_pack, uint32_unpack = _make_packer('>I') +# uint16_pack, uint16_unpack = _make_packer('>H') +# uint8_pack, uint8_unpack = _make_packer('>B') +# float_pack, float_unpack = _make_packer('>f') +# double_pack, double_unpack = _make_packer('>d') + +# Special case for cassandra header +header_struct = struct.Struct('>BBbB') +header_pack = header_struct.pack +header_unpack = header_struct.unpack + +# in protocol version 3 and higher, the stream ID is two bytes +v3_header_struct = struct.Struct('>BBhB') +v3_header_pack = v3_header_struct.pack +v3_header_unpack = v3_header_struct.unpack + + +if six.PY3: + def varint_unpack(term): + val = int(''.join("%02x" % i for i in term), 16) + if (term[0] & 128) != 0: + # There is a bug in Cython (0.20 - 0.22), where if we do + # '1 << (len(term) * 8)' Cython generates '1' directly into the + # C code, causing integer overflows. Treat it as an object for now + val -= ( 1L) << (len(term) * 8) + return val +else: + def varint_unpack(term): # noqa + val = int(term.encode('hex'), 16) + if (ord(term[0]) & 128) != 0: + val = val - (1 << (len(term) * 8)) + return val + + +def bitlength(n): + # return int(math.log2(n)) + 1 + bitlen = 0 + while n > 0: + n >>= 1 + bitlen += 1 + return bitlen + + +def varint_pack(big): + pos = True + if big == 0: + return b'\x00' + if big < 0: + bytelength = bitlength(abs(big) - 1) // 8 + 1 + big = (1 << bytelength * 8) + big + pos = False + revbytes = bytearray() + while big > 0: + revbytes.append(big & 0xff) + big >>= 8 + if pos and revbytes[-1] & 0x80: + revbytes.append(0) + revbytes.reverse() + return six.binary_type(revbytes) From ad7e4e08481b8cd48c5724256c955604713101eb Mon Sep 17 00:00:00 2001 From: Mark Florisson Date: Thu, 23 Jul 2015 15:47:04 +0100 Subject: [PATCH 1598/3726] Start on Cython version of ProtocolHandler --- cassandra/bytesio.pxd | 7 ++ cassandra/bytesio.pyx | 56 ++++++++++ cassandra/cython_protocol_handler.pyx | 154 ++++++++++++++++++++++++++ 3 files changed, 217 insertions(+) create mode 100644 cassandra/bytesio.pxd create mode 100644 cassandra/bytesio.pyx create mode 100644 cassandra/cython_protocol_handler.pyx diff --git a/cassandra/bytesio.pxd b/cassandra/bytesio.pxd new file mode 100644 index 0000000000..349fd600e6 --- /dev/null +++ b/cassandra/bytesio.pxd @@ -0,0 +1,7 @@ +cdef class BytesIOReader: + cdef bytes buf + cdef char *buf_ptr + cdef Py_ssize_t pos + cdef Py_ssize_t size + cdef char *read(self, Py_ssize_t n = ?) + diff --git a/cassandra/bytesio.pyx b/cassandra/bytesio.pyx new file mode 100644 index 0000000000..505fe391a6 --- /dev/null +++ b/cassandra/bytesio.pyx @@ -0,0 +1,56 @@ +# ython profile=True + +cdef class BytesIOReader: + """ + This class provides efficient support for reading bytes from a 'bytes' buffer, + by returning char * values directly without allocating intermediate objects. + """ + + def __init__(self, bytes buf): + self.buf = buf + self.size = len(buf) + self.buf_ptr = self.buf + + cdef char *read(self, Py_ssize_t n = -1): + """Read at most size bytes from the file + (less if the read hits EOF before obtaining size bytes). + + If the size argument is negative or omitted, read all data until EOF + is reached. The bytes are returned as a string object. An empty + string is returned when EOF is encountered immediately. + """ + cdef Py_ssize_t newpos = self.pos + n + cdef char *res + + if n < 0: + newpos = self.size + elif newpos > self.size: + self.pos = self.size + return b'' + else: + res = self.buf_ptr + self.pos + self.pos = newpos + return res + + +class PyBytesIOReader(BytesIOReader): + """ + Python-compatible BytesIOReader class + """ + + def read(self, n = -1): + """Read at most size bytes from the file + (less if the read hits EOF before obtaining size bytes). + + If the size argument is negative or omitted, read all data until EOF + is reached. The bytes are returned as a string object. An empty + string is returned when EOF is encountered immediately. + """ + if n is None or n < 0: + newpos = self.len + else: + newpos = min(self.pos+n, self.len) + r = self.buf[self.pos:newpos] + self.pos = newpos + return r + diff --git a/cassandra/cython_protocol_handler.pyx b/cassandra/cython_protocol_handler.pyx new file mode 100644 index 0000000000..85a5945a99 --- /dev/null +++ b/cassandra/cython_protocol_handler.pyx @@ -0,0 +1,154 @@ +# ython: profile=True + +from libc.stdint cimport int64_t, int32_t + +# from cassandra.marshal cimport (int8_pack, int8_unpack, int16_pack, int16_unpack, +# uint16_pack, uint16_unpack, uint32_pack, uint32_unpack, +# int32_pack, int32_unpack, int64_pack, int64_unpack, float_pack, float_unpack, double_pack, double_unpack) + +from cassandra.marshal import varint_pack, varint_unpack +from cassandra import util +from cassandra.cqltypes import EMPTY +from cassandra.protocol import ResultMessage, ProtocolHandler + +from cassandra.bytesio cimport BytesIOReader +from cassandra cimport typecodes + +import numpy as np + +include "marshal.pyx" + +class FastResultMessage(ResultMessage): + """ + Cython version of Result Message that has a faster implementation of + recv_results_row. + """ + # type_codes = ResultMessage.type_codes.copy() + code_to_type = dict((v, k) for k, v in ResultMessage.type_codes.items()) + + @classmethod + def recv_results_rows(cls, f, protocol_version, user_type_map): + paging_state, column_metadata = cls.recv_results_metadata(f, user_type_map) + + colnames = [c[2] for c in column_metadata] + coltypes = [c[3] for c in column_metadata] + colcodes = np.array( + [cls.code_to_type.get(coltype, -1) for coltype in coltypes], + dtype=np.dtype('i')) + parsed_rows = parse_rows(BytesIOReader(f.read()), colnames, + coltypes, colcodes, protocol_version) + return (paging_state, (colnames, parsed_rows)) + + +cdef parse_rows(BytesIOReader reader, list colnames, list coltypes, + int[::1] colcodes, protocol_version): + cdef Py_ssize_t i, rowcount + cdef char *raw_val + cdef int32_t raw_val_size + rowcount = read_int(reader) + # return RowIterator(reader, coltypes, colcodes, protocol_version, rowcount) + return [parse_row(reader, coltypes, colcodes, protocol_version) + for i in range(rowcount)] + + +cdef class RowIterator: + """ + Result iterator for a set of rows + + There seems to be an issue with generator expressions + memoryviews, so we + have a special iterator class instead. + """ + cdef list coltypes + cdef int[::1] colcodes + cdef Py_ssize_t rowcount, pos + cdef BytesIOReader reader + cdef object protocol_version + + def __init__(self, reader, coltypes, colcodes, protocol_version, rowcount): + self.reader = reader + self.coltypes = coltypes + self.colcodes = colcodes + self.protocol_version = protocol_version + self.rowcount = rowcount + self.pos = 0 + + def __iter__(self): + return self + + def __next__(self): + if self.pos >= self.rowcount: + raise StopIteration + self.pos += 1 + return parse_row(self.reader, self.coltypes, self.colcodes, self.protocol_version) + + next = __next__ + + +cdef inline parse_row(BytesIOReader reader, list coltypes, int[::1] colcodes, + protocol_version): + cdef Py_ssize_t j + + row = [] + for j, ctype in enumerate(coltypes): + raw_val_size = read_int(reader) + if raw_val_size < 0: + val = None + else: + raw_val = reader.read(raw_val_size) + val = from_binary(ctype, colcodes[j], raw_val, + raw_val_size, protocol_version) + row.append(val) + + return row + + +class CythonProtocolHandler(ProtocolHandler): + """ + Use FastResultMessage to decode query result message messages. + """ + my_opcodes = ProtocolHandler.message_types_by_opcode.copy() + my_opcodes[FastResultMessage.opcode] = FastResultMessage + message_types_by_opcode = my_opcodes + + +cdef inline int32_t read_int(BytesIOReader reader): + return int32_unpack(reader.read(4)) + + +cdef inline from_binary(cqltype, int typecode, char *byts, int32_t size, protocol_version): + """ + Deserialize a bytestring into a value. See the deserialize() method + for more information. This method differs in that if None or the empty + string is passed in, None may be returned. + + This method provides a fast-path deserialization routine. + """ + if size == 0 and cqltype.empty_binary_ok: + return empty(cqltype) + return deserialize(cqltype, typecode, byts, size, protocol_version) + + +cdef empty(cqltype): + return EMPTY if cqltype.support_empty_values else None + + +def to_binary(cqltype, val, protocol_version): + """ + Serialize a value into a bytestring. See the serialize() method for + more information. This method differs in that if None is passed in, + the result is the empty string. + """ + return b'' if val is None else cqltype.serialize(val, protocol_version) + + +cdef deserialize(cqltype, int typecode, char *byts, int32_t size, protocol_version): + if typecode == typecodes.LongType: + return int64_unpack(byts) + else: + return deserialize_generic(cqltype, typecode, byts, size, protocol_version) + +cdef deserialize_generic(cqltype, int typecode, char *byts, int32_t size, + protocol_version): + print("deserialize", cqltype) + return cqltype.deserialize(byts[:size], protocol_version) + From 39af4e15698081348dc97acdcaa4ffcd284f6ae2 Mon Sep 17 00:00:00 2001 From: Mark Florisson Date: Thu, 23 Jul 2015 16:03:23 +0100 Subject: [PATCH 1599/3726] Add Cython modules to setup.py --- cassandra/bytesio.pyx | 2 +- cassandra/cython_protocol_handler.pyx | 2 +- cassandra/marshal.pyx | 2 +- setup.py | 5 +++-- 4 files changed, 6 insertions(+), 5 deletions(-) diff --git a/cassandra/bytesio.pyx b/cassandra/bytesio.pyx index 505fe391a6..82887f4383 100644 --- a/cassandra/bytesio.pyx +++ b/cassandra/bytesio.pyx @@ -1,4 +1,4 @@ -# ython profile=True +# -- cython profile=True cdef class BytesIOReader: """ diff --git a/cassandra/cython_protocol_handler.pyx b/cassandra/cython_protocol_handler.pyx index 85a5945a99..add1e9f5ad 100644 --- a/cassandra/cython_protocol_handler.pyx +++ b/cassandra/cython_protocol_handler.pyx @@ -1,4 +1,4 @@ -# ython: profile=True +# -- cython: profile=True from libc.stdint cimport int64_t, int32_t diff --git a/cassandra/marshal.pyx b/cassandra/marshal.pyx index 4803686149..0efbf705bd 100644 --- a/cassandra/marshal.pyx +++ b/cassandra/marshal.pyx @@ -1,4 +1,4 @@ -# cython: profile=True +# -- cython: profile=True # Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/setup.py b/setup.py index 37899c2e01..7083d7aaac 100644 --- a/setup.py +++ b/setup.py @@ -37,7 +37,6 @@ DistutilsExecError) from distutils.cmd import Command - try: import subprocess has_subprocess = True @@ -262,11 +261,13 @@ def run_setup(extensions): if "--no-cython" not in sys.argv: try: from Cython.Build import cythonize - cython_candidates = ['cluster', 'concurrent', 'connection', 'cqltypes', 'marshal', 'metadata', 'pool', 'protocol', 'query', 'util'] + cython_candidates = ['cluster', 'concurrent', 'connection', 'cqltypes', 'metadata', 'pool', 'protocol', 'query', 'util'] compile_args = [] if is_windows else ['-Wno-unused-function'] extensions.extend(cythonize( [Extension('cassandra.%s' % m, ['cassandra/%s.py' % m], extra_compile_args=compile_args) for m in cython_candidates], exclude_failures=True)) + + extensions.extend(cythonize("cassandra/*.pyx")) except ImportError: sys.stderr.write("Cython is not installed. Not compiling core driver files as extensions (optional).") From 92457198cca1a3832455fbefcc81e3fed351b33d Mon Sep 17 00:00:00 2001 From: Mark Florisson Date: Thu, 23 Jul 2015 16:18:19 +0100 Subject: [PATCH 1600/3726] Use return type void for swap_order --- cassandra/marshal.pyx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/marshal.pyx b/cassandra/marshal.pyx index 0efbf705bd..8ffe3e46dd 100644 --- a/cassandra/marshal.pyx +++ b/cassandra/marshal.pyx @@ -43,7 +43,7 @@ cdef inline bytes pack(char *buf, Py_ssize_t size): return buf[:size] -cdef inline swap_order(char *buf, Py_ssize_t size): +cdef inline void swap_order(char *buf, Py_ssize_t size): """ Swap the byteorder of `buf` in-place (reverse all the bytes). There are functions ntohl etc, but these may be POSIX-dependent. From fe67aec185f63576e093d82a0491789a64301467 Mon Sep 17 00:00:00 2001 From: Mark Florisson Date: Thu, 23 Jul 2015 16:27:17 +0100 Subject: [PATCH 1601/3726] Make sure swap_order uses no PyObjects --- cassandra/marshal.pyx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cassandra/marshal.pyx b/cassandra/marshal.pyx index 8ffe3e46dd..529f45e75a 100644 --- a/cassandra/marshal.pyx +++ b/cassandra/marshal.pyx @@ -48,9 +48,9 @@ cdef inline void swap_order(char *buf, Py_ssize_t size): Swap the byteorder of `buf` in-place (reverse all the bytes). There are functions ntohl etc, but these may be POSIX-dependent. """ - cdef Py_ssize_t start, end + cdef Py_ssize_t start, end, i cdef char c - for i in range(size/2): + for i in range(size//2): end = size - i - 1 c = buf[i] buf[i] = buf[end] From 2b7997830a3073a2e73942d36f2e3b22f7443a6c Mon Sep 17 00:00:00 2001 From: Mark Florisson Date: Thu, 23 Jul 2015 16:31:44 +0100 Subject: [PATCH 1602/3726] Check endianness before byte-swapping --- cassandra/marshal.pyx | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/cassandra/marshal.pyx b/cassandra/marshal.pyx index 529f45e75a..2ecb0fa590 100644 --- a/cassandra/marshal.pyx +++ b/cassandra/marshal.pyx @@ -22,7 +22,7 @@ from libc.stdint cimport (int8_t, int16_t, int32_t, int64_t, uint8_t, uint16_t, uint32_t, uint64_t) assert sys.byteorder in ('little', 'big') -is_little_endian = sys.byteorder == 'little' +cdef bint is_little_endian = sys.byteorder == 'little' # cdef extern from "marshal.h": # cdef str c_string_to_python(char *p, Py_ssize_t len) @@ -38,23 +38,25 @@ cdef inline bytes pack(char *buf, Py_ssize_t size): """ Pack a buffer, given as a char *, into Python bytes in byte order. """ - if is_little_endian: - swap_order(buf, size) + swap_order(buf, size) return buf[:size] cdef inline void swap_order(char *buf, Py_ssize_t size): """ - Swap the byteorder of `buf` in-place (reverse all the bytes). + Swap the byteorder of `buf` in-place on little-endian platforms + (reverse all the bytes). There are functions ntohl etc, but these may be POSIX-dependent. """ cdef Py_ssize_t start, end, i cdef char c - for i in range(size//2): - end = size - i - 1 - c = buf[i] - buf[i] = buf[end] - buf[end] = c + + if is_little_endian: + for i in range(size//2): + end = size - i - 1 + c = buf[i] + buf[i] = buf[end] + buf[end] = c ### Packing and unpacking of signed integers From d3f7c17674f8263860c1b7acffb53629ac01c77e Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 20 Jul 2015 11:51:10 -0500 Subject: [PATCH 1603/3726] izip values in BoundStatement.bind small optimization --- cassandra/query.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/query.py b/cassandra/query.py index 21b668dfb3..0ba6c5947d 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -24,7 +24,7 @@ import struct import time import six -from six.moves import range +from six.moves import range, zip from cassandra import ConsistencyLevel, OperationTimedOut from cassandra.util import unix_time_from_uuid1 From c87159b6be399732e5614afcac4379cee8b8cf67 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 20 Jul 2015 11:51:38 -0500 Subject: [PATCH 1604/3726] Optimize Connection._read_frame_header no seek, read in-place --- cassandra/connection.py | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/cassandra/connection.py b/cassandra/connection.py index 5db88985e3..59684e712b 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -39,7 +39,7 @@ from six.moves import range from cassandra import ConsistencyLevel, AuthenticationFailed, OperationTimedOut -from cassandra.marshal import int32_pack, uint8_unpack +from cassandra.marshal import int32_pack from cassandra.protocol import (ReadyMessage, AuthenticateMessage, OptionsMessage, StartupMessage, ErrorMessage, CredentialsMessage, QueryMessage, ResultMessage, ProtocolHandler, @@ -99,7 +99,16 @@ def decompress(byts): frame_header_v1_v2 = struct.Struct('>BbBi') frame_header_v3 = struct.Struct('>BhBi') -_Frame = namedtuple('Frame', ('version', 'flags', 'stream', 'opcode', 'body_offset', 'end_pos')) + +class _Frame(object): + def __init__(self, version, flags, stream, opcode, body_offset, end_pos): + self.version = version + self.flags = flags + self.stream = stream + self.opcode = opcode + self.body_offset = body_offset + self.end_pos = end_pos + NONBLOCKING = (errno.EAGAIN, errno.EWOULDBLOCK) @@ -442,24 +451,20 @@ def control_conn_disposed(self): @defunct_on_error def _read_frame_header(self): - buf = self._iobuf - pos = buf.tell() + buf = self._iobuf.getvalue() + pos = len(buf) if pos: - buf.seek(0) - version = uint8_unpack(buf.read(1)) & PROTOCOL_VERSION_MASK + version = ord(buf[0]) & PROTOCOL_VERSION_MASK if version > MAX_SUPPORTED_VERSION: raise ProtocolError("This version of the driver does not support protocol version %d" % version) frame_header = frame_header_v3 if version >= 3 else frame_header_v1_v2 # this frame header struct is everything after the version byte header_size = frame_header.size + 1 if pos >= header_size: - flags, stream, op, body_len = frame_header.unpack(buf.read(frame_header.size)) + flags, stream, op, body_len = frame_header.unpack_from(buf, 1) if body_len < 0: raise ProtocolError("Received negative body length: %r" % body_len) self._current_frame = _Frame(version, flags, stream, op, header_size, body_len + header_size) - - self._iobuf.seek(pos) - return pos def _reset_frame(self): From fad1535b5d37e4c5fadc0812c172fc81d749548d Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Mon, 20 Jul 2015 12:39:00 -0500 Subject: [PATCH 1605/3726] construct new IO buffer with remainder of previous --- cassandra/connection.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/cassandra/connection.py b/cassandra/connection.py index 59684e712b..8b52185d11 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -468,9 +468,7 @@ def _read_frame_header(self): return pos def _reset_frame(self): - leftover = self._iobuf.read() - self._iobuf = io.BytesIO() - self._iobuf.write(leftover) + self._iobuf = io.BytesIO(self._iobuf.read()) self._current_frame = None def process_io_buffer(self): From 5e69140c990f38371ae78da6147deb65e17e2bef Mon Sep 17 00:00:00 2001 From: GregBestland Date: Thu, 23 Jul 2015 19:18:02 -0500 Subject: [PATCH 1606/3726] Adding integration and unit tests for PYTHON-358 --- .../standard/test_control_connection.py | 74 +++++++++++++++++++ tests/unit/test_control_connection.py | 38 ++++++++++ 2 files changed, 112 insertions(+) create mode 100644 tests/integration/standard/test_control_connection.py diff --git a/tests/integration/standard/test_control_connection.py b/tests/integration/standard/test_control_connection.py new file mode 100644 index 0000000000..af8ad6e0db --- /dev/null +++ b/tests/integration/standard/test_control_connection.py @@ -0,0 +1,74 @@ +# Copyright 2013-2015 DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License +# +# +# + +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa + + +from cassandra.cluster import Cluster +from cassandra.protocol import ConfigurationException +from tests.integration import use_singledc, PROTOCOL_VERSION +from tests.integration.datatype_utils import update_datatypes + + +def setup_module(): + use_singledc() + update_datatypes() + + +class ControlConnectionTests(unittest.TestCase): + def setUp(self): + self.cluster = Cluster(protocol_version=PROTOCOL_VERSION) + self.session = self.cluster.connect() + + def tearDown(self): + try: + self.session.execute("DROP KEYSPACE keyspacetodrop ") + except (ConfigurationException): + # we already removed the keyspace. + pass + self.cluster.shutdown() + + def test_drop_keyspace(self): + """ + Test to validate that dropping a keyspace with user defined types doesn't kill the control connection. + + + Creates a keyspace, and populates with a user defined type. It then records the control_connection's id. It + will then drop the keyspace and get the id of the control_connection again. They should be the same. If they are + not dropping the keyspace likely caused the control connection to be rebuilt. + + @since 2.7.0 + @jira_ticket PYTHON-358 + @expected_result the control connection is not killed + + @test_category connection + """ + + self.session.execute(""" + CREATE KEYSPACE keyspacetodrop + WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1' } + """) + self.session.set_keyspace("keyspacetodrop") + self.session.execute("CREATE TYPE user (age int, name text)") + self.session.execute("CREATE TABLE mytable (a int PRIMARY KEY, b frozen)") + cc_id_pre_drop = id(self.cluster.control_connection._connection) + self.session.execute("DROP KEYSPACE keyspacetodrop") + cc_id_post_drop = id(self.cluster.control_connection._connection) + self.assertEqual(cc_id_post_drop, cc_id_pre_drop) \ No newline at end of file diff --git a/tests/unit/test_control_connection.py b/tests/unit/test_control_connection.py index 1429531c2d..f1fa9cb6c0 100644 --- a/tests/unit/test_control_connection.py +++ b/tests/unit/test_control_connection.py @@ -464,3 +464,41 @@ def test_refresh_disabled(self): cluster.scheduler.schedule_unique.assert_has_calls([call(ANY, cc_no_topo_refresh.refresh_node_list_and_token_map), call(0.0, cc_no_topo_refresh.refresh_schema, schema_event['keyspace'], schema_event['table'], None, None, None)]) + + +class EventTimingTest(unittest.TestCase): + """ + A simple test to validate that event scheduling happens in order + Added for PYTHON-358 + """ + def setUp(self): + self.cluster = MockCluster() + self.connection = MockConnection() + self.time = FakeTime() + + # Use 2 for the schema_event_refresh_window which is what we would normally default to. + self.control_connection = ControlConnection(self.cluster, 1, 2, 0) + self.control_connection._connection = self.connection + self.control_connection._time = self.time + + def test_event_delay_timing(self): + """ + Submits a wide array of events make sure that each is scheduled to occur in the order they were received + """ + prior_delay = 0 + for _ in range(100): + for change_type in ('CREATED', 'DROPPED', 'UPDATED'): + event = { + 'change_type': change_type, + 'keyspace': '1', + 'table': 'table1' + } + # This is to increment the fake time, we don't actually sleep here. + self.time.sleep(.001) + self.cluster.scheduler.reset_mock() + self.control_connection._handle_schema_change(event) + self.cluster.scheduler.mock_calls + # Grabs the delay parameter from the scheduler invocation + current_delay = self.cluster.scheduler.mock_calls[0][1][0] + self.assertLess(prior_delay, current_delay) + prior_delay = current_delay From 5a22bf816267b1e9713f75eabf0c666418de3be6 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 24 Jul 2015 11:46:57 -0500 Subject: [PATCH 1607/3726] connection._Frame equality function to fix unit test --- cassandra/connection.py | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/cassandra/connection.py b/cassandra/connection.py index 8b52185d11..e2fed44901 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -13,12 +13,14 @@ # limitations under the License. from __future__ import absolute_import # to enable import io from stdlib -from collections import defaultdict, deque, namedtuple +from collections import defaultdict, deque import errno from functools import wraps, partial from heapq import heappush, heappop import io import logging +import six +from six.moves import range import socket import struct import sys @@ -35,9 +37,6 @@ else: from six.moves.queue import Queue, Empty # noqa -import six -from six.moves import range - from cassandra import ConsistencyLevel, AuthenticationFailed, OperationTimedOut from cassandra.marshal import int32_pack from cassandra.protocol import (ReadyMessage, AuthenticateMessage, OptionsMessage, @@ -109,6 +108,16 @@ def __init__(self, version, flags, stream, opcode, body_offset, end_pos): self.body_offset = body_offset self.end_pos = end_pos + def __eq__(self, other): # facilitates testing + if isinstance(other, _Frame): + return (self.version == other.version and + self.flags == other.flags and + self.stream == other.stream and + self.opcode == other.opcode and + self.body_offset == other.body_offset and + self.end_pos == other.end_pos) + return NotImplemented + NONBLOCKING = (errno.EAGAIN, errno.EWOULDBLOCK) @@ -130,6 +139,7 @@ class ConnectionShutdown(ConnectionException): """ pass + class ProtocolVersionUnsupported(ConnectionException): """ Server rejected startup message due to unsupported protocol version @@ -139,6 +149,7 @@ def __init__(self, host, startup_version): (host, startup_version)) self.startup_version = startup_version + class ConnectionBusy(Exception): """ An attempt was made to send a message through a :class:`.Connection` that @@ -248,7 +259,7 @@ def __init__(self, host='127.0.0.1', port=9042, authenticator=None, self.connected_event = Event() @classmethod - def initialize_reactor(self): + def initialize_reactor(cls): """ Called once by Cluster.connect(). This should be used by implementations to set up any resources that will be shared across connections. @@ -256,7 +267,7 @@ def initialize_reactor(self): pass @classmethod - def handle_fork(self): + def handle_fork(cls): """ Called after a forking. This should cleanup any remaining reactor state from the parent process. From 4764b341d5a269e6183816e8f6823a437f38212f Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 24 Jul 2015 12:50:46 -0500 Subject: [PATCH 1608/3726] Fix Connection._read_fram_header update for Python3 --- cassandra/connection.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/cassandra/connection.py b/cassandra/connection.py index e2fed44901..5112248b82 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -178,6 +178,11 @@ def wrapper(self, *args, **kwargs): DEFAULT_CQL_VERSION = '3.0.0' +if six.PY3: + def int_from_buf_item(i): + return i +else: + int_from_buf_item = ord class Connection(object): @@ -465,7 +470,7 @@ def _read_frame_header(self): buf = self._iobuf.getvalue() pos = len(buf) if pos: - version = ord(buf[0]) & PROTOCOL_VERSION_MASK + version = int_from_buf_item(buf[0]) & PROTOCOL_VERSION_MASK if version > MAX_SUPPORTED_VERSION: raise ProtocolError("This version of the driver does not support protocol version %d" % version) frame_header = frame_header_v3 if version >= 3 else frame_header_v1_v2 From 4a79c748319f6dc477f7bccf7cb4da42ce1f6d3e Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 24 Jul 2015 14:10:38 -0500 Subject: [PATCH 1609/3726] cython as optional extra for setup --- setup.py | 1 + 1 file changed, 1 insertion(+) diff --git a/setup.py b/setup.py index 37899c2e01..67594036e4 100644 --- a/setup.py +++ b/setup.py @@ -216,6 +216,7 @@ def run_setup(extensions): keywords='cassandra,cql,orm', include_package_data=True, install_requires=dependencies, + extras_require = {'cython': ['Cython']}, tests_require=['nose', 'mock<=1.0.1', 'PyYAML', 'pytz', 'sure'], classifiers=[ 'Development Status :: 5 - Production/Stable', From f2e9bb36b96947739c29f9392daa69292f563df7 Mon Sep 17 00:00:00 2001 From: Adam Holmberg Date: Fri, 24 Jul 2015 16:26:46 -0500 Subject: [PATCH 1610/3726] Upate performance notes doc: +cython, -numbers remove outdated, specific numbers Having numbers for contrived workloads sometimes confuses people or leads to impressions of false claims. Unified benchmarks at DataStax will replace these. --- docs/performance.rst | 299 ++++--------------------------------------- 1 file changed, 28 insertions(+), 271 deletions(-) diff --git a/docs/performance.rst b/docs/performance.rst index c7a6b3b290..cf11e73713 100644 --- a/docs/performance.rst +++ b/docs/performance.rst @@ -2,290 +2,47 @@ Performance Notes ================= The Python driver for Cassandra offers several methods for executing queries. You can synchronously block for queries to complete using -:meth:`.Session.execute()`, you can use a future-like interface through -:meth:`.Session.execute_async()`, or you can attach a callback to the future -with :meth:`.ResponseFuture.add_callback()`. Each of these methods has -different performance characteristics and behaves differently when -multiple threads are used. +:meth:`.Session.execute()`, you can obtain asynchronous request futures through +:meth:`.Session.execute_async()`, and you can attach a callback to the future +with :meth:`.ResponseFuture.add_callback()`. -Benchmark Notes ---------------- -All benchmarks were executed using the -`benchmark scripts `_ -in the driver repository. They were executed on a laptop with 16 GiB of RAM, an SSD, -and a 2 GHz, four core CPU with hyper-threading. The Cassandra cluster was a three -node `ccm `_ cluster running on the same laptop -with version 1.2.13 of Cassandra. I suggest testing these benchmarks against your -own cluster when tuning the driver for optimal throughput or latency. - -The 1.0.0 version of the driver was used with all default settings. For these -benchmarks, the driver was configured to use the ``libev`` reactor. You can also run -the benchmarks using the ``asyncore`` event loop (:class:`~.AsyncoreConnection`) -by using the ``--asyncore-only`` command line option. - -Each benchmark completes 100,000 small inserts. The replication factor for the -keyspace was three, so all nodes were replicas for the inserted rows. - -The benchmarks require the Python driver C extensions as well as a few additional -Python packages. Follow these steps to install the prerequisites: - -1. Install packages to support Python driver C extensions: - - * Debian/Ubuntu: ``sudo apt-get install gcc python-dev libev4 libev-dev`` - * RHEL/CentOS/Fedora: ``sudo yum install gcc python-dev libev4 libev-dev`` - -2. Install Python packages: ``pip install scales twisted blist`` -3. Re-install the Cassandra driver: ``pip install --upgrade cassandra-driver`` - -Synchronous Execution (`sync.py `_) -------------------------------------------------------------------------------------------------------------- -Although this is the simplest way to make queries, it has low throughput -in single threaded environments. This is basically what the benchmark -is doing: - -.. code-block:: python - - from cassandra.cluster import Cluster - - cluster = Cluster([127.0.0.1, 127.0.0.2, 127.0.0.3]) - session = cluster.connect() - - for i in range(100000): - session.execute("INSERT INTO mykeyspace.mytable (key, b, c) VALUES (a, 'b', 'c')") - -.. code-block:: bash - - ~/python-driver $ python benchmarks/sync.py -n 100000 --hosts=127.0.0.1,127.0.0.2,127.0.0.3 --libev-only --threads=1 - Average throughput: 434.08/sec - - -This technique does scale reasonably well as we add more threads: - -.. code-block:: bash - - ~/python-driver $ python benchmarks/sync.py -n 100000 --hosts=127.0.0.1,127.0.0.2,127.0.0.3 --libev-only --threads=2 - Average throughput: 830.49/sec - ~/python-driver $ python benchmarks/sync.py -n 100000 --hosts=127.0.0.1,127.0.0.2,127.0.0.3 --libev-only --threads=4 - Average throughput: 1078.27/sec - ~/python-driver $ python benchmarks/sync.py -n 100000 --hosts=127.0.0.1,127.0.0.2,127.0.0.3 --libev-only --threads=8 - Average throughput: 1275.20/sec - ~/python-driver $ python benchmarks/sync.py -n 100000 --hosts=127.0.0.1,127.0.0.2,127.0.0.3 --libev-only --threads=16 - Average throughput: 1345.56/sec - - -In my environment, throughput is maximized at about 20 threads. - - -Batched Futures (`future_batches.py `_) ---------------------------------------------------------------------------------------------------------------------------- -This is a simple way to work with futures for higher throughput. Essentially, -we start 120 queries asynchronously at the same time and then wait for them -all to complete. We then repeat this process until all 100,000 operations -have completed: - -.. code-block:: python - - futures = Queue.Queue(maxsize=121) - for i in range(100000): - if i % 120 == 0: - # clear the existing queue - while True: - try: - futures.get_nowait().result() - except Queue.Empty: - break - - future = session.execute_async(query) - futures.put_nowait(future) - -As expected, this improves throughput in a single-threaded environment: - -.. code-block:: bash - - ~/python-driver $ python benchmarks/future_batches.py -n 100000 --hosts=127.0.0.1,127.0.0.2,127.0.0.3 --libev-only --threads=1 - Average throughput: 3477.56/sec - -However, adding more threads may actually harm throughput: - -.. code-block:: bash - - ~/python-driver $ python benchmarks/future_batches.py -n 100000 --hosts=127.0.0.1,127.0.0.2,127.0.0.3 --libev-only --threads=2 - Average throughput: 2360.52/sec - ~/python-driver $ python benchmarks/future_batches.py -n 100000 --hosts=127.0.0.1,127.0.0.2,127.0.0.3 --libev-only --threads=4 - Average throughput: 2293.21/sec - ~/python-driver $ python benchmarks/future_batches.py -n 100000 --hosts=127.0.0.1,127.0.0.2,127.0.0.3 --libev-only --threads=8 - Average throughput: 2244.85/sec - - -Queued Futures (`future_full_pipeline.py `_) --------------------------------------------------------------------------------------------------------------------------------------- -This pattern is similar to batched futures. The main difference is that -every time we put a future on the queue, we pull the oldest future out -and wait for it to complete: - -.. code-block:: python - - futures = Queue.Queue(maxsize=121) - for i in range(100000): - if i >= 120: - old_future = futures.get_nowait() - old_future.result() - - future = session.execute_async(query) - futures.put_nowait(future) - -This gets slightly better throughput than the Batched Futures pattern: - -.. code-block:: bash +Examples of multiple request patterns can be found in the benchmark scripts included in the driver project. - ~/python-driver $ python benchmarks/future_full_pipeline.py -n 100000 --hosts=127.0.0.1,127.0.0.2,127.0.0.3 --libev-only --threads=1 - Average throughput: 3635.76/sec +The choice of execution pattern will depend on the application context. For applications dealing with multiple +requests in a given context, the recommended pattern is to use concurrent asynchronous +requests with callbacks. For many use cases, you don't need to implement this pattern yourself. +:meth:`cassandra.concurrent.execute_concurrent` and :meth:`cassandra.concurrent.execute_concurrent_with_args` +provide this pattern with a synchronous API and tunable concurrency. -But this has the same throughput issues when multiple threads are used: - -.. code-block:: bash - - ~/python-driver $ python benchmarks/future_full_pipeline.py -n 100000 --hosts=127.0.0.1,127.0.0.2,127.0.0.3 --libev-only --threads=2 - Average throughput: 2213.62/sec - ~/python-driver $ python benchmarks/future_full_pipeline.py -n 100000 --hosts=127.0.0.1,127.0.0.2,127.0.0.3 --libev-only --threads=4 - Average throughput: 2707.62/sec - ~/python-driver $ python benchmarks/future_full_pipeline.py -n 100000 --hosts=127.0.0.1,127.0.0.2,127.0.0.3 --libev-only --threads=8 - Average throughput: 2462.42/sec - -Unthrottled Futures (`future_full_throttle.py `_) -------------------------------------------------------------------------------------------------------------------------------------------- -What happens if we don't throttle our async requests at all? - -.. code-block:: python - - futures = [] - for i in range(100000): - future = session.execute_async(query) - futures.append(future) - - for future in futures: - future.result() - -Throughput is about the same as the previous pattern, but a lot of memory will -be consumed by the list of Futures: - -.. code-block:: bash - - ~/python-driver $ python benchmarks/future_full_throttle.py -n 100000 --hosts=127.0.0.1,127.0.0.2,127.0.0.3 --libev-only --threads=1 - Average throughput: 3474.11/sec - ~/python-driver $ python benchmarks/future_full_throttle.py -n 100000 --hosts=127.0.0.1,127.0.0.2,127.0.0.3 --libev-only --threads=2 - Average throughput: 2389.61/sec - ~/python-driver $ python benchmarks/future_full_throttle.py -n 100000 --hosts=127.0.0.1,127.0.0.2,127.0.0.3 --libev-only --threads=4 - Average throughput: 2371.75/sec - ~/python-driver $ python benchmarks/future_full_throttle.py -n 100000 --hosts=127.0.0.1,127.0.0.2,127.0.0.3 --libev-only --threads=8 - Average throughput: 2165.29/sec - -Callback Chaining (`callback_full_pipeline.py `_) ------------------------------------------------------------------------------------------------------------------------------------------------ -This pattern is very different from the previous patterns. Here we're taking -advantage of the :meth:`.ResponseFuture.add_callback()` function to start -another request as soon as one finishes. Furthermore, we're starting 120 -of these callback chains, so we've always got about 120 operations in -flight at any time: - -.. code-block:: python - - from itertools import count - from threading import Event - - sentinel = object() - num_queries = 100000 - num_started = count() - num_finished = count() - finished_event = Event() - - def insert_next(previous_result=sentinel): - if previous_result is not sentinel: - if isinstance(previous_result, BaseException): - log.error("Error on insert: %r", previous_result) - if num_finished.next() >= num_queries: - finished_event.set() - - if num_started.next() <= num_queries: - future = session.execute_async(query) - # NOTE: this callback also handles errors - future.add_callbacks(insert_next, insert_next) - - for i in range(min(120, num_queries)): - insert_next() - - finished_event.wait() - -This is a more complex pattern, but the throughput is excellent: - -.. code-block:: bash - - ~/python-driver $ python benchmarks/callback_full_pipeline.py -n 100000 --hosts=127.0.0.1,127.0.0.2,127.0.0.3 --libev-only --threads=1 - Average throughput: 7647.30/sec - -Part of the reason why performance is so good is that everything is running on -single thread: the internal event loop thread that powers the driver. The -downside to this is that adding more threads doesn't improve anything: - -.. code-block:: bash - - ~/python-driver $ python benchmarks/callback_full_pipeline.py -n 100000 --hosts=127.0.0.1,127.0.0.2,127.0.0.3 --libev-only --threads=2 - Average throughput: 7704.58/sec - - -What happens if we have more than 120 callback chains running? - -With 250 chains: - -.. code-block:: bash - - ~/python-driver $ python benchmarks/callback_full_pipeline.py -n 100000 --hosts=127.0.0.1,127.0.0.2,127.0.0.3 --libev-only --threads=1 - Average throughput: 7794.22/sec - -Things look pretty good with 250 chains. If we try 500 chains, we start to max out -all of the connections in the connection pools. The problem is that the current -version of the driver isn't very good at throttling these callback chains, so -a lot of time gets spent waiting for new connections and performance drops -dramatically: - -.. code-block:: bash - - ~/python-driver $ python benchmarks/callback_full_pipeline.py -n 100000 --hosts=127.0.0.1,127.0.0.2,127.0.0.3 --libev-only --threads=1 - Average throughput: 679.61/sec - -When :attr:`.Cluster.protocol_version` is set to 1 or 2, you should limit the -number of callback chains you run to roughly 100 per node in the cluster. -When :attr:`~.Cluster.protocol_version` is 3 or higher, you can safely experiment -with higher numbers of callback chains. - -For many use cases, you don't need to implement this pattern yourself. You can -simply use :meth:`cassandra.concurrent.execute_concurrent` and -:meth:`cassandra.concurrent.execute_concurrent_with_args`, which implement -this pattern for you with a synchronous API. +Due to the GIL and limited concurrency, the driver can become CPU-bound pretty quickly. The sections below +discuss further runtime and design considerations for mitigating this limitation. PyPy ---- -Almost all of these patterns become CPU-bound pretty quickly with CPython, the -normal implementation of python. `PyPy `_ is an alternative -implementation of Python (written in Python) which uses a JIT compiler to -reduce CPU consumption. This leads to a huge improvement in the driver -performance: +`PyPy `_ is an alternative Python runtime which uses a JIT compiler to +reduce CPU consumption. This leads to a huge improvement in the driver performance, +more than doubling throughput for many workloads. -.. code-block:: bash +Cython Extensions +----------------- +`Cython `_ is an optimizing compiler and language that can be used to compile the core files and +optional extensions for the driver. Cython is not a strict dependency, but the extensions will be built by default +if cython is present in the python path. To include Cython as a requirement, invoke with the extra name ``cython``: - ~/python-driver $ pypy benchmarks/callback_full_pipeline.py -n 500000 --hosts=127.0.0.1,127.0.0.2,127.0.0.3 --asyncore-only --threads=1 - Average throughput: 18782.00/sec +.. code-block:: bash -Eventually the driver may add C extensions to reduce CPU consumption, which -would probably narrow the gap between the performance of CPython and PyPy. + $ pip install cassandra-driver[cython] multiprocessing --------------- -All of the patterns here may be used over multiple processes using the +All of the patterns discussed above may be used over multiple processes using the `multiprocessing `_ -module. Multiple processes will scale significantly better than multiple -threads will, so if high throughput is your goal, consider this option. +module. Multiple processes will scale better than multiple threads, so if high throughput is your goal, +consider this option. -Just be sure to **never share any** :class:`~.Cluster`, :class:`~.Session`, +Be sure to **never share any** :class:`~.Cluster`, :class:`~.Session`, **or** :class:`~.ResponseFuture` **objects across multiple processes**. These objects should all be created after forking the process, not before. + +For further discussion and simple examples using the driver with ``multiprocessing``, +see `this blog post `_. From 6a00db265bd25fe4e87d8b2164164b836bd7b7e2 Mon Sep 17 00:00:00 2001 From: Ash Hoover Date: Fri, 24 Jul 2015 15:45:45 -0700 Subject: [PATCH 1611/3726] Fix a typo in cqlengine query docs --- docs/api/cassandra/cqlengine/query.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/api/cassandra/cqlengine/query.rst b/docs/api/cassandra/cqlengine/query.rst index 8486011a98..df823704f3 100644 --- a/docs/api/cassandra/cqlengine/query.rst +++ b/docs/api/cassandra/cqlengine/query.rst @@ -6,7 +6,7 @@ QuerySet -------- QuerySet objects are typically obtained by calling :meth:`~.cassandra.cqlengine.models.Model.objects` on a model class. -The mehtods here are used to filter, order, and constrain results. +The methods here are used to filter, order, and constrain results. .. autoclass:: ModelQuerySet From 8c79bc5d6030e0371d9ece42930d97fb84a64231 Mon Sep 17 00:00:00 2001 From: Kishan Karunaratne Date: Mon, 27 Jul 2015 16:35:48 -0700 Subject: [PATCH 1612/3726] added doxyfile for Doxygen --- doxyfile_python | 2337 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 2337 insertions(+) create mode 100644 doxyfile_python diff --git a/doxyfile_python b/doxyfile_python new file mode 100644 index 0000000000..01cf58b6e0 --- /dev/null +++ b/doxyfile_python @@ -0,0 +1,2337 @@ +# Doxyfile 1.8.8 + +# This file describes the settings to be used by the documentation system +# doxygen (www.doxygen.org) for a project. +# +# All text after a double hash (##) is considered a comment and is placed in +# front of the TAG it is preceding. +# +# All text after a single hash (#) is considered a comment and will be ignored. +# The format is: +# TAG = value [value, ...] +# For lists, items can also be appended using: +# TAG += value [value, ...] +# Values that contain spaces should be placed between quotes (\" \"). + +#--------------------------------------------------------------------------- +# Project related configuration options +#--------------------------------------------------------------------------- + +# This tag specifies the encoding used for all characters in the config file +# that follow. The default is UTF-8 which is also the encoding used for all text +# before the first occurrence of this tag. Doxygen uses libiconv (or the iconv +# built into libc) for the transcoding. See http://www.gnu.org/software/libiconv +# for the list of possible encodings. +# The default value is: UTF-8. + +DOXYFILE_ENCODING = UTF-8 + +# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by +# double-quotes, unless you are using Doxywizard) that should identify the +# project for which the documentation is generated. This name is used in the +# title of most generated pages and in a few other places. +# The default value is: My Project. + +PROJECT_NAME = "Python Driver" + +# The PROJECT_NUMBER tag can be used to enter a project or revision number. This +# could be handy for archiving the generated documentation or if some version +# control system is used. + +PROJECT_NUMBER = + +# Using the PROJECT_BRIEF tag one can provide an optional one line description +# for a project that appears at the top of each page and should give viewer a +# quick idea about the purpose of the project. Keep the description short. + +PROJECT_BRIEF = + +# With the PROJECT_LOGO tag one can specify an logo or icon that is included in +# the documentation. The maximum height of the logo should not exceed 55 pixels +# and the maximum width should not exceed 200 pixels. Doxygen will copy the logo +# to the output directory. + +PROJECT_LOGO = + +# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path +# into which the generated documentation will be written. If a relative path is +# entered, it will be relative to the location where doxygen was started. If +# left blank the current directory will be used. + +OUTPUT_DIRECTORY = /home/jenkins/workspace/python_docs + +# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create 4096 sub- +# directories (in 2 levels) under the output directory of each output format and +# will distribute the generated files over these directories. Enabling this +# option can be useful when feeding doxygen a huge amount of source files, where +# putting all generated files in the same directory would otherwise causes +# performance problems for the file system. +# The default value is: NO. + +CREATE_SUBDIRS = NO + +# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII +# characters to appear in the names of generated files. If set to NO, non-ASCII +# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode +# U+3044. +# The default value is: NO. + +ALLOW_UNICODE_NAMES = NO + +# The OUTPUT_LANGUAGE tag is used to specify the language in which all +# documentation generated by doxygen is written. Doxygen will use this +# information to generate all constant output in the proper language. +# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese, +# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States), +# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian, +# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages), +# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian, +# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian, +# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish, +# Ukrainian and Vietnamese. +# The default value is: English. + +OUTPUT_LANGUAGE = English + +# If the BRIEF_MEMBER_DESC tag is set to YES doxygen will include brief member +# descriptions after the members that are listed in the file and class +# documentation (similar to Javadoc). Set to NO to disable this. +# The default value is: YES. + +BRIEF_MEMBER_DESC = NO + +# If the REPEAT_BRIEF tag is set to YES doxygen will prepend the brief +# description of a member or function before the detailed description +# +# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the +# brief descriptions will be completely suppressed. +# The default value is: YES. + +REPEAT_BRIEF = YES + +# This tag implements a quasi-intelligent brief description abbreviator that is +# used to form the text in various listings. Each string in this list, if found +# as the leading text of the brief description, will be stripped from the text +# and the result, after processing the whole list, is used as the annotated +# text. Otherwise, the brief description is used as-is. If left blank, the +# following values are used ($name is automatically replaced with the name of +# the entity):The $name class, The $name widget, The $name file, is, provides, +# specifies, contains, represents, a, an and the. + +ABBREVIATE_BRIEF = + +# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then +# doxygen will generate a detailed section even if there is only a brief +# description. +# The default value is: NO. + +ALWAYS_DETAILED_SEC = NO + +# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all +# inherited members of a class in the documentation of that class as if those +# members were ordinary class members. Constructors, destructors and assignment +# operators of the base classes will not be shown. +# The default value is: NO. + +INLINE_INHERITED_MEMB = NO + +# If the FULL_PATH_NAMES tag is set to YES doxygen will prepend the full path +# before files name in the file list and in the header files. If set to NO the +# shortest path that makes the file name unique will be used +# The default value is: YES. + +FULL_PATH_NAMES = NO + +# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path. +# Stripping is only done if one of the specified strings matches the left-hand +# part of the path. The tag can be used to show relative paths in the file list. +# If left blank the directory from which doxygen is run is used as the path to +# strip. +# +# Note that you can specify absolute paths here, but also relative paths, which +# will be relative from the directory where doxygen is started. +# This tag requires that the tag FULL_PATH_NAMES is set to YES. + +STRIP_FROM_PATH = + +# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the +# path mentioned in the documentation of a class, which tells the reader which +# header file to include in order to use a class. If left blank only the name of +# the header file containing the class definition is used. Otherwise one should +# specify the list of include paths that are normally passed to the compiler +# using the -I flag. + +STRIP_FROM_INC_PATH = + +# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but +# less readable) file names. This can be useful is your file systems doesn't +# support long names like on DOS, Mac, or CD-ROM. +# The default value is: NO. + +SHORT_NAMES = NO + +# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the +# first line (until the first dot) of a Javadoc-style comment as the brief +# description. If set to NO, the Javadoc-style will behave just like regular Qt- +# style comments (thus requiring an explicit @brief command for a brief +# description.) +# The default value is: NO. + +JAVADOC_AUTOBRIEF = NO + +# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first +# line (until the first dot) of a Qt-style comment as the brief description. If +# set to NO, the Qt-style will behave just like regular Qt-style comments (thus +# requiring an explicit \brief command for a brief description.) +# The default value is: NO. + +QT_AUTOBRIEF = NO + +# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a +# multi-line C++ special comment block (i.e. a block of //! or /// comments) as +# a brief description. This used to be the default behavior. The new default is +# to treat a multi-line C++ comment block as a detailed description. Set this +# tag to YES if you prefer the old behavior instead. +# +# Note that setting this tag to YES also means that rational rose comments are +# not recognized any more. +# The default value is: NO. + +MULTILINE_CPP_IS_BRIEF = NO + +# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the +# documentation from any documented member that it re-implements. +# The default value is: YES. + +INHERIT_DOCS = YES + +# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce a +# new page for each member. If set to NO, the documentation of a member will be +# part of the file/class/namespace that contains it. +# The default value is: NO. + +SEPARATE_MEMBER_PAGES = NO + +# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen +# uses this value to replace tabs by spaces in code fragments. +# Minimum value: 1, maximum value: 16, default value: 4. + +TAB_SIZE = 4 + +# This tag can be used to specify a number of aliases that act as commands in +# the documentation. An alias has the form: +# name=value +# For example adding +# "sideeffect=@par Side Effects:\n" +# will allow you to put the command \sideeffect (or @sideeffect) in the +# documentation, which will result in a user-defined paragraph with heading +# "Side Effects:". You can put \n's in the value part of an alias to insert +# newlines. + +ALIASES = +ALIASES += expected_errors="\par Expected Errors\n" +ALIASES += jira_ticket="\par JIRA Ticket\n" +ALIASES += expected_result="\par Expected Result\n" +ALIASES += test_assumptions="\par Test Assumptions\n" +ALIASES += note="\par Note\n" +ALIASES += test_category="\par Test Category\n" + +# This tag can be used to specify a number of word-keyword mappings (TCL only). +# A mapping has the form "name=value". For example adding "class=itcl::class" +# will allow you to use the command class in the itcl::class meaning. + +TCL_SUBST = + +# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources +# only. Doxygen will then generate output that is more tailored for C. For +# instance, some of the names that are used will be different. The list of all +# members will be omitted, etc. +# The default value is: NO. + +OPTIMIZE_OUTPUT_FOR_C = NO + +# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or +# Python sources only. Doxygen will then generate output that is more tailored +# for that language. For instance, namespaces will be presented as packages, +# qualified scopes will look different, etc. +# The default value is: NO. + +OPTIMIZE_OUTPUT_JAVA = YES + +# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran +# sources. Doxygen will then generate output that is tailored for Fortran. +# The default value is: NO. + +OPTIMIZE_FOR_FORTRAN = NO + +# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL +# sources. Doxygen will then generate output that is tailored for VHDL. +# The default value is: NO. + +OPTIMIZE_OUTPUT_VHDL = NO + +# Doxygen selects the parser to use depending on the extension of the files it +# parses. With this tag you can assign which parser to use for a given +# extension. Doxygen has a built-in mapping, but you can override or extend it +# using this tag. The format is ext=language, where ext is a file extension, and +# language is one of the parsers supported by doxygen: IDL, Java, Javascript, +# C#, C, C++, D, PHP, Objective-C, Python, Fortran (fixed format Fortran: +# FortranFixed, free formatted Fortran: FortranFree, unknown formatted Fortran: +# Fortran. In the later case the parser tries to guess whether the code is fixed +# or free formatted code, this is the default for Fortran type files), VHDL. For +# instance to make doxygen treat .inc files as Fortran files (default is PHP), +# and .f files as C (default is Fortran), use: inc=Fortran f=C. +# +# Note For files without extension you can use no_extension as a placeholder. +# +# Note that for custom extensions you also need to set FILE_PATTERNS otherwise +# the files are not read by doxygen. + +EXTENSION_MAPPING = + +# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments +# according to the Markdown format, which allows for more readable +# documentation. See http://daringfireball.net/projects/markdown/ for details. +# The output of markdown processing is further processed by doxygen, so you can +# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in +# case of backward compatibilities issues. +# The default value is: YES. + +MARKDOWN_SUPPORT = YES + +# When enabled doxygen tries to link words that correspond to documented +# classes, or namespaces to their corresponding documentation. Such a link can +# be prevented in individual cases by by putting a % sign in front of the word +# or globally by setting AUTOLINK_SUPPORT to NO. +# The default value is: YES. + +AUTOLINK_SUPPORT = YES + +# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want +# to include (a tag file for) the STL sources as input, then you should set this +# tag to YES in order to let doxygen match functions declarations and +# definitions whose arguments contain STL classes (e.g. func(std::string); +# versus func(std::string) {}). This also make the inheritance and collaboration +# diagrams that involve STL classes more complete and accurate. +# The default value is: NO. + +BUILTIN_STL_SUPPORT = NO + +# If you use Microsoft's C++/CLI language, you should set this option to YES to +# enable parsing support. +# The default value is: NO. + +CPP_CLI_SUPPORT = NO + +# Set the SIP_SUPPORT tag to YES if your project consists of sip (see: +# http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen +# will parse them like normal C++ but will assume all classes use public instead +# of private inheritance when no explicit protection keyword is present. +# The default value is: NO. + +SIP_SUPPORT = NO + +# For Microsoft's IDL there are propget and propput attributes to indicate +# getter and setter methods for a property. Setting this option to YES will make +# doxygen to replace the get and set methods by a property in the documentation. +# This will only work if the methods are indeed getting or setting a simple +# type. If this is not the case, or you want to show the methods anyway, you +# should set this option to NO. +# The default value is: YES. + +IDL_PROPERTY_SUPPORT = YES + +# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC +# tag is set to YES, then doxygen will reuse the documentation of the first +# member in the group (if any) for the other members of the group. By default +# all members of a group must be documented explicitly. +# The default value is: NO. + +DISTRIBUTE_GROUP_DOC = NO + +# Set the SUBGROUPING tag to YES to allow class member groups of the same type +# (for instance a group of public functions) to be put as a subgroup of that +# type (e.g. under the Public Functions section). Set it to NO to prevent +# subgrouping. Alternatively, this can be done per class using the +# \nosubgrouping command. +# The default value is: YES. + +SUBGROUPING = YES + +# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions +# are shown inside the group in which they are included (e.g. using \ingroup) +# instead of on a separate page (for HTML and Man pages) or section (for LaTeX +# and RTF). +# +# Note that this feature does not work in combination with +# SEPARATE_MEMBER_PAGES. +# The default value is: NO. + +INLINE_GROUPED_CLASSES = NO + +# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions +# with only public data fields or simple typedef fields will be shown inline in +# the documentation of the scope in which they are defined (i.e. file, +# namespace, or group documentation), provided this scope is documented. If set +# to NO, structs, classes, and unions are shown on a separate page (for HTML and +# Man pages) or section (for LaTeX and RTF). +# The default value is: NO. + +INLINE_SIMPLE_STRUCTS = NO + +# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or +# enum is documented as struct, union, or enum with the name of the typedef. So +# typedef struct TypeS {} TypeT, will appear in the documentation as a struct +# with name TypeT. When disabled the typedef will appear as a member of a file, +# namespace, or class. And the struct will be named TypeS. This can typically be +# useful for C code in case the coding convention dictates that all compound +# types are typedef'ed and only the typedef is referenced, never the tag name. +# The default value is: NO. + +TYPEDEF_HIDES_STRUCT = NO + +# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This +# cache is used to resolve symbols given their name and scope. Since this can be +# an expensive process and often the same symbol appears multiple times in the +# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small +# doxygen will become slower. If the cache is too large, memory is wasted. The +# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range +# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536 +# symbols. At the end of a run doxygen will report the cache usage and suggest +# the optimal cache size from a speed point of view. +# Minimum value: 0, maximum value: 9, default value: 0. + +LOOKUP_CACHE_SIZE = 0 + +#--------------------------------------------------------------------------- +# Build related configuration options +#--------------------------------------------------------------------------- + +# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in +# documentation are documented, even if no documentation was available. Private +# class members and static file members will be hidden unless the +# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES. +# Note: This will also disable the warnings about undocumented members that are +# normally produced when WARNINGS is set to YES. +# The default value is: NO. + +EXTRACT_ALL = NO + +# If the EXTRACT_PRIVATE tag is set to YES all private members of a class will +# be included in the documentation. +# The default value is: NO. + +EXTRACT_PRIVATE = NO + +# If the EXTRACT_PACKAGE tag is set to YES all members with package or internal +# scope will be included in the documentation. +# The default value is: NO. + +EXTRACT_PACKAGE = NO + +# If the EXTRACT_STATIC tag is set to YES all static members of a file will be +# included in the documentation. +# The default value is: NO. + +EXTRACT_STATIC = NO + +# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) defined +# locally in source files will be included in the documentation. If set to NO +# only classes defined in header files are included. Does not have any effect +# for Java sources. +# The default value is: YES. + +EXTRACT_LOCAL_CLASSES = YES + +# This flag is only useful for Objective-C code. When set to YES local methods, +# which are defined in the implementation section but not in the interface are +# included in the documentation. If set to NO only methods in the interface are +# included. +# The default value is: NO. + +EXTRACT_LOCAL_METHODS = NO + +# If this flag is set to YES, the members of anonymous namespaces will be +# extracted and appear in the documentation as a namespace called +# 'anonymous_namespace{file}', where file will be replaced with the base name of +# the file that contains the anonymous namespace. By default anonymous namespace +# are hidden. +# The default value is: NO. + +EXTRACT_ANON_NSPACES = NO + +# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all +# undocumented members inside documented classes or files. If set to NO these +# members will be included in the various overviews, but no documentation +# section is generated. This option has no effect if EXTRACT_ALL is enabled. +# The default value is: NO. + +HIDE_UNDOC_MEMBERS = NO + +# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all +# undocumented classes that are normally visible in the class hierarchy. If set +# to NO these classes will be included in the various overviews. This option has +# no effect if EXTRACT_ALL is enabled. +# The default value is: NO. + +HIDE_UNDOC_CLASSES = NO + +# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend +# (class|struct|union) declarations. If set to NO these declarations will be +# included in the documentation. +# The default value is: NO. + +HIDE_FRIEND_COMPOUNDS = NO + +# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any +# documentation blocks found inside the body of a function. If set to NO these +# blocks will be appended to the function's detailed documentation block. +# The default value is: NO. + +HIDE_IN_BODY_DOCS = NO + +# The INTERNAL_DOCS tag determines if documentation that is typed after a +# \internal command is included. If the tag is set to NO then the documentation +# will be excluded. Set it to YES to include the internal documentation. +# The default value is: NO. + +INTERNAL_DOCS = NO + +# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file +# names in lower-case letters. If set to YES upper-case letters are also +# allowed. This is useful if you have classes or files whose names only differ +# in case and if your file system supports case sensitive file names. Windows +# and Mac users are advised to set this option to NO. +# The default value is: system dependent. + +CASE_SENSE_NAMES = YES + +# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with +# their full class and namespace scopes in the documentation. If set to YES the +# scope will be hidden. +# The default value is: NO. + +HIDE_SCOPE_NAMES = NO + +# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of +# the files that are included by a file in the documentation of that file. +# The default value is: YES. + +SHOW_INCLUDE_FILES = YES + +# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each +# grouped member an include statement to the documentation, telling the reader +# which file to include in order to use the member. +# The default value is: NO. + +SHOW_GROUPED_MEMB_INC = NO + +# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include +# files with double quotes in the documentation rather than with sharp brackets. +# The default value is: NO. + +FORCE_LOCAL_INCLUDES = NO + +# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the +# documentation for inline members. +# The default value is: YES. + +INLINE_INFO = YES + +# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the +# (detailed) documentation of file and class members alphabetically by member +# name. If set to NO the members will appear in declaration order. +# The default value is: YES. + +SORT_MEMBER_DOCS = YES + +# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief +# descriptions of file, namespace and class members alphabetically by member +# name. If set to NO the members will appear in declaration order. Note that +# this will also influence the order of the classes in the class list. +# The default value is: NO. + +SORT_BRIEF_DOCS = NO + +# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the +# (brief and detailed) documentation of class members so that constructors and +# destructors are listed first. If set to NO the constructors will appear in the +# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS. +# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief +# member documentation. +# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting +# detailed member documentation. +# The default value is: NO. + +SORT_MEMBERS_CTORS_1ST = NO + +# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy +# of group names into alphabetical order. If set to NO the group names will +# appear in their defined order. +# The default value is: NO. + +SORT_GROUP_NAMES = NO + +# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by +# fully-qualified names, including namespaces. If set to NO, the class list will +# be sorted only by class name, not including the namespace part. +# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. +# Note: This option applies only to the class list, not to the alphabetical +# list. +# The default value is: NO. + +SORT_BY_SCOPE_NAME = NO + +# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper +# type resolution of all parameters of a function it will reject a match between +# the prototype and the implementation of a member function even if there is +# only one candidate or it is obvious which candidate to choose by doing a +# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still +# accept a match between prototype and implementation in such cases. +# The default value is: NO. + +STRICT_PROTO_MATCHING = NO + +# The GENERATE_TODOLIST tag can be used to enable ( YES) or disable ( NO) the +# todo list. This list is created by putting \todo commands in the +# documentation. +# The default value is: YES. + +GENERATE_TODOLIST = YES + +# The GENERATE_TESTLIST tag can be used to enable ( YES) or disable ( NO) the +# test list. This list is created by putting \test commands in the +# documentation. +# The default value is: YES. + +GENERATE_TESTLIST = YES + +# The GENERATE_BUGLIST tag can be used to enable ( YES) or disable ( NO) the bug +# list. This list is created by putting \bug commands in the documentation. +# The default value is: YES. + +GENERATE_BUGLIST = YES + +# The GENERATE_DEPRECATEDLIST tag can be used to enable ( YES) or disable ( NO) +# the deprecated list. This list is created by putting \deprecated commands in +# the documentation. +# The default value is: YES. + +GENERATE_DEPRECATEDLIST= YES + +# The ENABLED_SECTIONS tag can be used to enable conditional documentation +# sections, marked by \if ... \endif and \cond +# ... \endcond blocks. + +ENABLED_SECTIONS = + +# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the +# initial value of a variable or macro / define can have for it to appear in the +# documentation. If the initializer consists of more lines than specified here +# it will be hidden. Use a value of 0 to hide initializers completely. The +# appearance of the value of individual variables and macros / defines can be +# controlled using \showinitializer or \hideinitializer command in the +# documentation regardless of this setting. +# Minimum value: 0, maximum value: 10000, default value: 30. + +MAX_INITIALIZER_LINES = 30 + +# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at +# the bottom of the documentation of classes and structs. If set to YES the list +# will mention the files that were used to generate the documentation. +# The default value is: YES. + +SHOW_USED_FILES = YES + +# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This +# will remove the Files entry from the Quick Index and from the Folder Tree View +# (if specified). +# The default value is: YES. + +SHOW_FILES = YES + +# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces +# page. This will remove the Namespaces entry from the Quick Index and from the +# Folder Tree View (if specified). +# The default value is: YES. + +SHOW_NAMESPACES = YES + +# The FILE_VERSION_FILTER tag can be used to specify a program or script that +# doxygen should invoke to get the current version for each file (typically from +# the version control system). Doxygen will invoke the program by executing (via +# popen()) the command command input-file, where command is the value of the +# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided +# by doxygen. Whatever the program writes to standard output is used as the file +# version. For an example see the documentation. + +FILE_VERSION_FILTER = + +# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed +# by doxygen. The layout file controls the global structure of the generated +# output files in an output format independent way. To create the layout file +# that represents doxygen's defaults, run doxygen with the -l option. You can +# optionally specify a file name after the option, if omitted DoxygenLayout.xml +# will be used as the name of the layout file. +# +# Note that if you run doxygen from a directory containing a file called +# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE +# tag is left empty. + +LAYOUT_FILE = + +# The CITE_BIB_FILES tag can be used to specify one or more bib files containing +# the reference definitions. This must be a list of .bib files. The .bib +# extension is automatically appended if omitted. This requires the bibtex tool +# to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info. +# For LaTeX the style of the bibliography can be controlled using +# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the +# search path. See also \cite for info how to create references. + +CITE_BIB_FILES = + +#--------------------------------------------------------------------------- +# Configuration options related to warning and progress messages +#--------------------------------------------------------------------------- + +# The QUIET tag can be used to turn on/off the messages that are generated to +# standard output by doxygen. If QUIET is set to YES this implies that the +# messages are off. +# The default value is: NO. + +QUIET = NO + +# The WARNINGS tag can be used to turn on/off the warning messages that are +# generated to standard error ( stderr) by doxygen. If WARNINGS is set to YES +# this implies that the warnings are on. +# +# Tip: Turn warnings on while writing the documentation. +# The default value is: YES. + +WARNINGS = YES + +# If the WARN_IF_UNDOCUMENTED tag is set to YES, then doxygen will generate +# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag +# will automatically be disabled. +# The default value is: YES. + +WARN_IF_UNDOCUMENTED = YES + +# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for +# potential errors in the documentation, such as not documenting some parameters +# in a documented function, or documenting parameters that don't exist or using +# markup commands wrongly. +# The default value is: YES. + +WARN_IF_DOC_ERROR = YES + +# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that +# are documented, but have no documentation for their parameters or return +# value. If set to NO doxygen will only warn about wrong or incomplete parameter +# documentation, but not about the absence of documentation. +# The default value is: NO. + +WARN_NO_PARAMDOC = NO + +# The WARN_FORMAT tag determines the format of the warning messages that doxygen +# can produce. The string should contain the $file, $line, and $text tags, which +# will be replaced by the file and line number from which the warning originated +# and the warning text. Optionally the format may contain $version, which will +# be replaced by the version of the file (if it could be obtained via +# FILE_VERSION_FILTER) +# The default value is: $file:$line: $text. + +WARN_FORMAT = "$file:$line: $text" + +# The WARN_LOGFILE tag can be used to specify a file to which warning and error +# messages should be written. If left blank the output is written to standard +# error (stderr). + +WARN_LOGFILE = + +#--------------------------------------------------------------------------- +# Configuration options related to the input files +#--------------------------------------------------------------------------- + +# The INPUT tag is used to specify the files and/or directories that contain +# documented source files. You may enter file names like myfile.cpp or +# directories like /usr/src/myproject. Separate the files or directories with +# spaces. +# Note: If this tag is empty the current directory is searched. + +INPUT = /home/jenkins/workspace/python_docs/tests/integration/ + +# This tag can be used to specify the character encoding of the source files +# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses +# libiconv (or the iconv built into libc) for the transcoding. See the libiconv +# documentation (see: http://www.gnu.org/software/libiconv) for the list of +# possible encodings. +# The default value is: UTF-8. + +INPUT_ENCODING = UTF-8 + +# If the value of the INPUT tag contains directories, you can use the +# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and +# *.h) to filter out the source-files in the directories. If left blank the +# following patterns are tested:*.c, *.cc, *.cxx, *.cpp, *.c++, *.java, *.ii, +# *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, *.hh, *.hxx, *.hpp, +# *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, *.m, *.markdown, +# *.md, *.mm, *.dox, *.py, *.f90, *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf, +# *.qsf, *.as and *.js. + +FILE_PATTERNS = *.py + +# The RECURSIVE tag can be used to specify whether or not subdirectories should +# be searched for input files as well. +# The default value is: NO. + +RECURSIVE = YES + +# The EXCLUDE tag can be used to specify files and/or directories that should be +# excluded from the INPUT source files. This way you can easily exclude a +# subdirectory from a directory tree whose root is specified with the INPUT tag. +# +# Note that relative paths are relative to the directory from which doxygen is +# run. + +EXCLUDE = + +# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or +# directories that are symbolic links (a Unix file system feature) are excluded +# from the input. +# The default value is: NO. + +EXCLUDE_SYMLINKS = NO + +# If the value of the INPUT tag contains directories, you can use the +# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude +# certain files from those directories. +# +# Note that the wildcards are matched against the file with absolute path, so to +# exclude all test directories for example use the pattern */test/* + +EXCLUDE_PATTERNS = + +# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names +# (namespaces, classes, functions, etc.) that should be excluded from the +# output. The symbol name can be a fully qualified name, a word, or if the +# wildcard * is used, a substring. Examples: ANamespace, AClass, +# AClass::ANamespace, ANamespace::*Test +# +# Note that the wildcards are matched against the file with absolute path, so to +# exclude all test directories use the pattern */test/* + +EXCLUDE_SYMBOLS = @Test + +# The EXAMPLE_PATH tag can be used to specify one or more files or directories +# that contain example code fragments that are included (see the \include +# command). + +EXAMPLE_PATH = + +# If the value of the EXAMPLE_PATH tag contains directories, you can use the +# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and +# *.h) to filter out the source-files in the directories. If left blank all +# files are included. + +EXAMPLE_PATTERNS = + +# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be +# searched for input files to be used with the \include or \dontinclude commands +# irrespective of the value of the RECURSIVE tag. +# The default value is: NO. + +EXAMPLE_RECURSIVE = NO + +# The IMAGE_PATH tag can be used to specify one or more files or directories +# that contain images that are to be included in the documentation (see the +# \image command). + +IMAGE_PATH = + +# The INPUT_FILTER tag can be used to specify a program that doxygen should +# invoke to filter for each input file. Doxygen will invoke the filter program +# by executing (via popen()) the command: +# +# +# +# where is the value of the INPUT_FILTER tag, and is the +# name of an input file. Doxygen will then use the output that the filter +# program writes to standard output. If FILTER_PATTERNS is specified, this tag +# will be ignored. +# +# Note that the filter must not add or remove lines; it is applied before the +# code is scanned, but not when the output code is generated. If lines are added +# or removed, the anchors will not be placed correctly. + +INPUT_FILTER = "python /usr/local/bin/doxypy.py" + +# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern +# basis. Doxygen will compare the file name with each pattern and apply the +# filter if there is a match. The filters are a list of the form: pattern=filter +# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how +# filters are used. If the FILTER_PATTERNS tag is empty or if none of the +# patterns match the file name, INPUT_FILTER is applied. + +FILTER_PATTERNS = + +# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using +# INPUT_FILTER ) will also be used to filter the input files that are used for +# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES). +# The default value is: NO. + +FILTER_SOURCE_FILES = YES + +# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file +# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and +# it is also possible to disable source filtering for a specific pattern using +# *.ext= (so without naming a filter). +# This tag requires that the tag FILTER_SOURCE_FILES is set to YES. + +FILTER_SOURCE_PATTERNS = + +# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that +# is part of the input, its contents will be placed on the main page +# (index.html). This can be useful if you have a project on for instance GitHub +# and want to reuse the introduction page also for the doxygen output. + +USE_MDFILE_AS_MAINPAGE = + +#--------------------------------------------------------------------------- +# Configuration options related to source browsing +#--------------------------------------------------------------------------- + +# If the SOURCE_BROWSER tag is set to YES then a list of source files will be +# generated. Documented entities will be cross-referenced with these sources. +# +# Note: To get rid of all source code in the generated output, make sure that +# also VERBATIM_HEADERS is set to NO. +# The default value is: NO. + +SOURCE_BROWSER = NO + +# Setting the INLINE_SOURCES tag to YES will include the body of functions, +# classes and enums directly into the documentation. +# The default value is: NO. + +INLINE_SOURCES = NO + +# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any +# special comment blocks from generated source code fragments. Normal C, C++ and +# Fortran comments will always remain visible. +# The default value is: YES. + +STRIP_CODE_COMMENTS = YES + +# If the REFERENCED_BY_RELATION tag is set to YES then for each documented +# function all documented functions referencing it will be listed. +# The default value is: NO. + +REFERENCED_BY_RELATION = NO + +# If the REFERENCES_RELATION tag is set to YES then for each documented function +# all documented entities called/used by that function will be listed. +# The default value is: NO. + +REFERENCES_RELATION = NO + +# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set +# to YES, then the hyperlinks from functions in REFERENCES_RELATION and +# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will +# link to the documentation. +# The default value is: YES. + +REFERENCES_LINK_SOURCE = YES + +# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the +# source code will show a tooltip with additional information such as prototype, +# brief description and links to the definition and documentation. Since this +# will make the HTML file larger and loading of large files a bit slower, you +# can opt to disable this feature. +# The default value is: YES. +# This tag requires that the tag SOURCE_BROWSER is set to YES. + +SOURCE_TOOLTIPS = YES + +# If the USE_HTAGS tag is set to YES then the references to source code will +# point to the HTML generated by the htags(1) tool instead of doxygen built-in +# source browser. The htags tool is part of GNU's global source tagging system +# (see http://www.gnu.org/software/global/global.html). You will need version +# 4.8.6 or higher. +# +# To use it do the following: +# - Install the latest version of global +# - Enable SOURCE_BROWSER and USE_HTAGS in the config file +# - Make sure the INPUT points to the root of the source tree +# - Run doxygen as normal +# +# Doxygen will invoke htags (and that will in turn invoke gtags), so these +# tools must be available from the command line (i.e. in the search path). +# +# The result: instead of the source browser generated by doxygen, the links to +# source code will now point to the output of htags. +# The default value is: NO. +# This tag requires that the tag SOURCE_BROWSER is set to YES. + +USE_HTAGS = NO + +# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a +# verbatim copy of the header file for each class for which an include is +# specified. Set to NO to disable this. +# See also: Section \class. +# The default value is: YES. + +VERBATIM_HEADERS = YES + +#--------------------------------------------------------------------------- +# Configuration options related to the alphabetical class index +#--------------------------------------------------------------------------- + +# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all +# compounds will be generated. Enable this if the project contains a lot of +# classes, structs, unions or interfaces. +# The default value is: YES. + +ALPHABETICAL_INDEX = YES + +# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in +# which the alphabetical index list will be split. +# Minimum value: 1, maximum value: 20, default value: 5. +# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. + +COLS_IN_ALPHA_INDEX = 5 + +# In case all classes in a project start with a common prefix, all classes will +# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag +# can be used to specify a prefix (or a list of prefixes) that should be ignored +# while generating the index headers. +# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. + +IGNORE_PREFIX = + +#--------------------------------------------------------------------------- +# Configuration options related to the HTML output +#--------------------------------------------------------------------------- + +# If the GENERATE_HTML tag is set to YES doxygen will generate HTML output +# The default value is: YES. + +GENERATE_HTML = YES + +# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a +# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of +# it. +# The default directory is: html. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_OUTPUT = html + +# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each +# generated HTML page (for example: .htm, .php, .asp). +# The default value is: .html. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FILE_EXTENSION = .html + +# The HTML_HEADER tag can be used to specify a user-defined HTML header file for +# each generated HTML page. If the tag is left blank doxygen will generate a +# standard header. +# +# To get valid HTML the header file that includes any scripts and style sheets +# that doxygen needs, which is dependent on the configuration options used (e.g. +# the setting GENERATE_TREEVIEW). It is highly recommended to start with a +# default header using +# doxygen -w html new_header.html new_footer.html new_stylesheet.css +# YourConfigFile +# and then modify the file new_header.html. See also section "Doxygen usage" +# for information on how to generate the default header that doxygen normally +# uses. +# Note: The header is subject to change so you typically have to regenerate the +# default header when upgrading to a newer version of doxygen. For a description +# of the possible markers and block names see the documentation. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_HEADER = + +# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each +# generated HTML page. If the tag is left blank doxygen will generate a standard +# footer. See HTML_HEADER for more information on how to generate a default +# footer and what special commands can be used inside the footer. See also +# section "Doxygen usage" for information on how to generate the default footer +# that doxygen normally uses. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FOOTER = + +# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style +# sheet that is used by each HTML page. It can be used to fine-tune the look of +# the HTML output. If left blank doxygen will generate a default style sheet. +# See also section "Doxygen usage" for information on how to generate the style +# sheet that doxygen normally uses. +# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as +# it is more robust and this tag (HTML_STYLESHEET) will in the future become +# obsolete. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_STYLESHEET = + +# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined +# cascading style sheets that are included after the standard style sheets +# created by doxygen. Using this option one can overrule certain style aspects. +# This is preferred over using HTML_STYLESHEET since it does not replace the +# standard style sheet and is therefor more robust against future updates. +# Doxygen will copy the style sheet files to the output directory. +# Note: The order of the extra stylesheet files is of importance (e.g. the last +# stylesheet in the list overrules the setting of the previous ones in the +# list). For an example see the documentation. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_EXTRA_STYLESHEET = + +# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or +# other source files which should be copied to the HTML output directory. Note +# that these files will be copied to the base HTML output directory. Use the +# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these +# files. In the HTML_STYLESHEET file, use the file name only. Also note that the +# files will be copied as-is; there are no commands or markers available. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_EXTRA_FILES = + +# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen +# will adjust the colors in the stylesheet and background images according to +# this color. Hue is specified as an angle on a colorwheel, see +# http://en.wikipedia.org/wiki/Hue for more information. For instance the value +# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300 +# purple, and 360 is red again. +# Minimum value: 0, maximum value: 359, default value: 220. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_HUE = 220 + +# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors +# in the HTML output. For a value of 0 the output will use grayscales only. A +# value of 255 will produce the most vivid colors. +# Minimum value: 0, maximum value: 255, default value: 100. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_SAT = 100 + +# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the +# luminance component of the colors in the HTML output. Values below 100 +# gradually make the output lighter, whereas values above 100 make the output +# darker. The value divided by 100 is the actual gamma applied, so 80 represents +# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not +# change the gamma. +# Minimum value: 40, maximum value: 240, default value: 80. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_GAMMA = 80 + +# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML +# page will contain the date and time when the page was generated. Setting this +# to NO can help when comparing the output of multiple runs. +# The default value is: YES. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_TIMESTAMP = YES + +# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML +# documentation will contain sections that can be hidden and shown after the +# page has loaded. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_DYNAMIC_SECTIONS = NO + +# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries +# shown in the various tree structured indices initially; the user can expand +# and collapse entries dynamically later on. Doxygen will expand the tree to +# such a level that at most the specified number of entries are visible (unless +# a fully collapsed tree already exceeds this amount). So setting the number of +# entries 1 will produce a full collapsed tree by default. 0 is a special value +# representing an infinite number of entries and will result in a full expanded +# tree by default. +# Minimum value: 0, maximum value: 9999, default value: 100. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_INDEX_NUM_ENTRIES = 100 + +# If the GENERATE_DOCSET tag is set to YES, additional index files will be +# generated that can be used as input for Apple's Xcode 3 integrated development +# environment (see: http://developer.apple.com/tools/xcode/), introduced with +# OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a +# Makefile in the HTML output directory. Running make will produce the docset in +# that directory and running make install will install the docset in +# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at +# startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html +# for more information. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_DOCSET = NO + +# This tag determines the name of the docset feed. A documentation feed provides +# an umbrella under which multiple documentation sets from a single provider +# (such as a company or product suite) can be grouped. +# The default value is: Doxygen generated docs. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_FEEDNAME = "Doxygen generated docs" + +# This tag specifies a string that should uniquely identify the documentation +# set bundle. This should be a reverse domain-name style string, e.g. +# com.mycompany.MyDocSet. Doxygen will append .docset to the name. +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_BUNDLE_ID = org.doxygen.Project + +# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify +# the documentation publisher. This should be a reverse domain-name style +# string, e.g. com.mycompany.MyDocSet.documentation. +# The default value is: org.doxygen.Publisher. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_PUBLISHER_ID = org.doxygen.Publisher + +# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher. +# The default value is: Publisher. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_PUBLISHER_NAME = Publisher + +# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three +# additional HTML index files: index.hhp, index.hhc, and index.hhk. The +# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop +# (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on +# Windows. +# +# The HTML Help Workshop contains a compiler that can convert all HTML output +# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML +# files are now used as the Windows 98 help format, and will replace the old +# Windows help format (.hlp) on all Windows platforms in the future. Compressed +# HTML files also contain an index, a table of contents, and you can search for +# words in the documentation. The HTML workshop also contains a viewer for +# compressed HTML files. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_HTMLHELP = NO + +# The CHM_FILE tag can be used to specify the file name of the resulting .chm +# file. You can add a path in front of the file if the result should not be +# written to the html output directory. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +CHM_FILE = + +# The HHC_LOCATION tag can be used to specify the location (absolute path +# including file name) of the HTML help compiler ( hhc.exe). If non-empty +# doxygen will try to run the HTML help compiler on the generated index.hhp. +# The file has to be specified with full path. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +HHC_LOCATION = + +# The GENERATE_CHI flag controls if a separate .chi index file is generated ( +# YES) or that it should be included in the master .chm file ( NO). +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +GENERATE_CHI = NO + +# The CHM_INDEX_ENCODING is used to encode HtmlHelp index ( hhk), content ( hhc) +# and project file content. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +CHM_INDEX_ENCODING = + +# The BINARY_TOC flag controls whether a binary table of contents is generated ( +# YES) or a normal table of contents ( NO) in the .chm file. Furthermore it +# enables the Previous and Next buttons. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +BINARY_TOC = NO + +# The TOC_EXPAND flag can be set to YES to add extra items for group members to +# the table of contents of the HTML help documentation and to the tree view. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +TOC_EXPAND = NO + +# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and +# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that +# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help +# (.qch) of the generated HTML documentation. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_QHP = NO + +# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify +# the file name of the resulting .qch file. The path specified is relative to +# the HTML output folder. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QCH_FILE = + +# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help +# Project output. For more information please see Qt Help Project / Namespace +# (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace). +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_NAMESPACE = org.doxygen.Project + +# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt +# Help Project output. For more information please see Qt Help Project / Virtual +# Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual- +# folders). +# The default value is: doc. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_VIRTUAL_FOLDER = doc + +# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom +# filter to add. For more information please see Qt Help Project / Custom +# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- +# filters). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_CUST_FILTER_NAME = + +# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the +# custom filter to add. For more information please see Qt Help Project / Custom +# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- +# filters). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_CUST_FILTER_ATTRS = + +# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this +# project's filter section matches. Qt Help Project / Filter Attributes (see: +# http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_SECT_FILTER_ATTRS = + +# The QHG_LOCATION tag can be used to specify the location of Qt's +# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the +# generated .qhp file. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHG_LOCATION = + +# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be +# generated, together with the HTML files, they form an Eclipse help plugin. To +# install this plugin and make it available under the help contents menu in +# Eclipse, the contents of the directory containing the HTML and XML files needs +# to be copied into the plugins directory of eclipse. The name of the directory +# within the plugins directory should be the same as the ECLIPSE_DOC_ID value. +# After copying Eclipse needs to be restarted before the help appears. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_ECLIPSEHELP = NO + +# A unique identifier for the Eclipse help plugin. When installing the plugin +# the directory name containing the HTML and XML files should also have this +# name. Each documentation set should have its own identifier. +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES. + +ECLIPSE_DOC_ID = org.doxygen.Project + +# If you want full control over the layout of the generated HTML pages it might +# be necessary to disable the index and replace it with your own. The +# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top +# of each HTML page. A value of NO enables the index and the value YES disables +# it. Since the tabs in the index contain the same information as the navigation +# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +DISABLE_INDEX = NO + +# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index +# structure should be generated to display hierarchical information. If the tag +# value is set to YES, a side panel will be generated containing a tree-like +# index structure (just like the one that is generated for HTML Help). For this +# to work a browser that supports JavaScript, DHTML, CSS and frames is required +# (i.e. any modern browser). Windows users are probably better off using the +# HTML help feature. Via custom stylesheets (see HTML_EXTRA_STYLESHEET) one can +# further fine-tune the look of the index. As an example, the default style +# sheet generated by doxygen has an example that shows how to put an image at +# the root of the tree instead of the PROJECT_NAME. Since the tree basically has +# the same information as the tab index, you could consider setting +# DISABLE_INDEX to YES when enabling this option. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_TREEVIEW = YES + +# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that +# doxygen will group on one line in the generated HTML documentation. +# +# Note that a value of 0 will completely suppress the enum values from appearing +# in the overview section. +# Minimum value: 0, maximum value: 20, default value: 4. +# This tag requires that the tag GENERATE_HTML is set to YES. + +ENUM_VALUES_PER_LINE = 4 + +# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used +# to set the initial width (in pixels) of the frame in which the tree is shown. +# Minimum value: 0, maximum value: 1500, default value: 250. +# This tag requires that the tag GENERATE_HTML is set to YES. + +TREEVIEW_WIDTH = 250 + +# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open links to +# external symbols imported via tag files in a separate window. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +EXT_LINKS_IN_WINDOW = NO + +# Use this tag to change the font size of LaTeX formulas included as images in +# the HTML documentation. When you change the font size after a successful +# doxygen run you need to manually remove any form_*.png images from the HTML +# output directory to force them to be regenerated. +# Minimum value: 8, maximum value: 50, default value: 10. +# This tag requires that the tag GENERATE_HTML is set to YES. + +FORMULA_FONTSIZE = 10 + +# Use the FORMULA_TRANPARENT tag to determine whether or not the images +# generated for formulas are transparent PNGs. Transparent PNGs are not +# supported properly for IE 6.0, but are supported on all modern browsers. +# +# Note that when changing this option you need to delete any form_*.png files in +# the HTML output directory before the changes have effect. +# The default value is: YES. +# This tag requires that the tag GENERATE_HTML is set to YES. + +FORMULA_TRANSPARENT = YES + +# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see +# http://www.mathjax.org) which uses client side Javascript for the rendering +# instead of using prerendered bitmaps. Use this if you do not have LaTeX +# installed or if you want to formulas look prettier in the HTML output. When +# enabled you may also need to install MathJax separately and configure the path +# to it using the MATHJAX_RELPATH option. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +USE_MATHJAX = NO + +# When MathJax is enabled you can set the default output format to be used for +# the MathJax output. See the MathJax site (see: +# http://docs.mathjax.org/en/latest/output.html) for more details. +# Possible values are: HTML-CSS (which is slower, but has the best +# compatibility), NativeMML (i.e. MathML) and SVG. +# The default value is: HTML-CSS. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_FORMAT = HTML-CSS + +# When MathJax is enabled you need to specify the location relative to the HTML +# output directory using the MATHJAX_RELPATH option. The destination directory +# should contain the MathJax.js script. For instance, if the mathjax directory +# is located at the same level as the HTML output directory, then +# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax +# Content Delivery Network so you can quickly see the result without installing +# MathJax. However, it is strongly recommended to install a local copy of +# MathJax from http://www.mathjax.org before deployment. +# The default value is: http://cdn.mathjax.org/mathjax/latest. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest + +# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax +# extension names that should be enabled during MathJax rendering. For example +# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_EXTENSIONS = + +# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces +# of code that will be used on startup of the MathJax code. See the MathJax site +# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an +# example see the documentation. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_CODEFILE = + +# When the SEARCHENGINE tag is enabled doxygen will generate a search box for +# the HTML output. The underlying search engine uses javascript and DHTML and +# should work on any modern browser. Note that when using HTML help +# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET) +# there is already a search function so this one should typically be disabled. +# For large projects the javascript based search engine can be slow, then +# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to +# search using the keyboard; to jump to the search box use + S +# (what the is depends on the OS and browser, but it is typically +# , /